mirror of
https://github.com/go-gitea/gitea
synced 2024-12-22 16:47:57 +01:00
a915a09e4f
* Cleaning up public/ and documenting js/css libs. This commit mostly addresses #1484 by moving vendor'ed plugins into a vendor/ directory and documenting their upstream source and license in vendor/librejs.html. This also proves gitea is using only open source js/css libraries which helps toward reaching #1524. * Removing unused css file. The version of this file in use is located at: vendor/plugins/highlight/github.css * Cleaned up librejs.html and added javascript header A SafeJS function was added to templates/helper.go to allow keeping comments inside of javascript. A javascript comment was added in the header of templates/base/head.tmpl to mark all non-inline source as free. The librejs.html file was updated to meet the current librejs spec. I have now verified that the librejs plugin detects most of the scripts included in gitea and suspect the non-free detections are the result of a bug in the plugin. I believe this commit is enough to meet the C0.0 requirement of #1534. * Updating SafeJS function per lint suggestion * Added VERSIONS file, per request
253 lines
6.5 KiB
JavaScript
253 lines
6.5 KiB
JavaScript
// CodeMirror, copyright (c) by Marijn Haverbeke and others
|
|
// Distributed under an MIT license: http://codemirror.net/LICENSE
|
|
|
|
(function(mod) {
|
|
if (typeof exports == "object" && typeof module == "object") // CommonJS
|
|
mod(require("../../lib/codemirror"));
|
|
else if (typeof define == "function" && define.amd) // AMD
|
|
define(["../../lib/codemirror"], mod);
|
|
else // Plain browser env
|
|
mod(CodeMirror);
|
|
})(function(CodeMirror) {
|
|
"use strict";
|
|
|
|
CodeMirror.defineMode("oz", function (conf) {
|
|
|
|
function wordRegexp(words) {
|
|
return new RegExp("^((" + words.join(")|(") + "))\\b");
|
|
}
|
|
|
|
var singleOperators = /[\^@!\|<>#~\.\*\-\+\\/,=]/;
|
|
var doubleOperators = /(<-)|(:=)|(=<)|(>=)|(<=)|(<:)|(>:)|(=:)|(\\=)|(\\=:)|(!!)|(==)|(::)/;
|
|
var tripleOperators = /(:::)|(\.\.\.)|(=<:)|(>=:)/;
|
|
|
|
var middle = ["in", "then", "else", "of", "elseof", "elsecase", "elseif", "catch",
|
|
"finally", "with", "require", "prepare", "import", "export", "define", "do"];
|
|
var end = ["end"];
|
|
|
|
var atoms = wordRegexp(["true", "false", "nil", "unit"]);
|
|
var commonKeywords = wordRegexp(["andthen", "at", "attr", "declare", "feat", "from", "lex",
|
|
"mod", "mode", "orelse", "parser", "prod", "prop", "scanner", "self", "syn", "token"]);
|
|
var openingKeywords = wordRegexp(["local", "proc", "fun", "case", "class", "if", "cond", "or", "dis",
|
|
"choice", "not", "thread", "try", "raise", "lock", "for", "suchthat", "meth", "functor"]);
|
|
var middleKeywords = wordRegexp(middle);
|
|
var endKeywords = wordRegexp(end);
|
|
|
|
// Tokenizers
|
|
function tokenBase(stream, state) {
|
|
if (stream.eatSpace()) {
|
|
return null;
|
|
}
|
|
|
|
// Brackets
|
|
if(stream.match(/[{}]/)) {
|
|
return "bracket";
|
|
}
|
|
|
|
// Special [] keyword
|
|
if (stream.match(/(\[])/)) {
|
|
return "keyword"
|
|
}
|
|
|
|
// Operators
|
|
if (stream.match(tripleOperators) || stream.match(doubleOperators)) {
|
|
return "operator";
|
|
}
|
|
|
|
// Atoms
|
|
if(stream.match(atoms)) {
|
|
return 'atom';
|
|
}
|
|
|
|
// Opening keywords
|
|
var matched = stream.match(openingKeywords);
|
|
if (matched) {
|
|
if (!state.doInCurrentLine)
|
|
state.currentIndent++;
|
|
else
|
|
state.doInCurrentLine = false;
|
|
|
|
// Special matching for signatures
|
|
if(matched[0] == "proc" || matched[0] == "fun")
|
|
state.tokenize = tokenFunProc;
|
|
else if(matched[0] == "class")
|
|
state.tokenize = tokenClass;
|
|
else if(matched[0] == "meth")
|
|
state.tokenize = tokenMeth;
|
|
|
|
return 'keyword';
|
|
}
|
|
|
|
// Middle and other keywords
|
|
if (stream.match(middleKeywords) || stream.match(commonKeywords)) {
|
|
return "keyword"
|
|
}
|
|
|
|
// End keywords
|
|
if (stream.match(endKeywords)) {
|
|
state.currentIndent--;
|
|
return 'keyword';
|
|
}
|
|
|
|
// Eat the next char for next comparisons
|
|
var ch = stream.next();
|
|
|
|
// Strings
|
|
if (ch == '"' || ch == "'") {
|
|
state.tokenize = tokenString(ch);
|
|
return state.tokenize(stream, state);
|
|
}
|
|
|
|
// Numbers
|
|
if (/[~\d]/.test(ch)) {
|
|
if (ch == "~") {
|
|
if(! /^[0-9]/.test(stream.peek()))
|
|
return null;
|
|
else if (( stream.next() == "0" && stream.match(/^[xX][0-9a-fA-F]+/)) || stream.match(/^[0-9]*(\.[0-9]+)?([eE][~+]?[0-9]+)?/))
|
|
return "number";
|
|
}
|
|
|
|
if ((ch == "0" && stream.match(/^[xX][0-9a-fA-F]+/)) || stream.match(/^[0-9]*(\.[0-9]+)?([eE][~+]?[0-9]+)?/))
|
|
return "number";
|
|
|
|
return null;
|
|
}
|
|
|
|
// Comments
|
|
if (ch == "%") {
|
|
stream.skipToEnd();
|
|
return 'comment';
|
|
}
|
|
else if (ch == "/") {
|
|
if (stream.eat("*")) {
|
|
state.tokenize = tokenComment;
|
|
return tokenComment(stream, state);
|
|
}
|
|
}
|
|
|
|
// Single operators
|
|
if(singleOperators.test(ch)) {
|
|
return "operator";
|
|
}
|
|
|
|
// If nothing match, we skip the entire alphanumerical block
|
|
stream.eatWhile(/\w/);
|
|
|
|
return "variable";
|
|
}
|
|
|
|
function tokenClass(stream, state) {
|
|
if (stream.eatSpace()) {
|
|
return null;
|
|
}
|
|
stream.match(/([A-Z][A-Za-z0-9_]*)|(`.+`)/);
|
|
state.tokenize = tokenBase;
|
|
return "variable-3"
|
|
}
|
|
|
|
function tokenMeth(stream, state) {
|
|
if (stream.eatSpace()) {
|
|
return null;
|
|
}
|
|
stream.match(/([a-zA-Z][A-Za-z0-9_]*)|(`.+`)/);
|
|
state.tokenize = tokenBase;
|
|
return "def"
|
|
}
|
|
|
|
function tokenFunProc(stream, state) {
|
|
if (stream.eatSpace()) {
|
|
return null;
|
|
}
|
|
|
|
if(!state.hasPassedFirstStage && stream.eat("{")) {
|
|
state.hasPassedFirstStage = true;
|
|
return "bracket";
|
|
}
|
|
else if(state.hasPassedFirstStage) {
|
|
stream.match(/([A-Z][A-Za-z0-9_]*)|(`.+`)|\$/);
|
|
state.hasPassedFirstStage = false;
|
|
state.tokenize = tokenBase;
|
|
return "def"
|
|
}
|
|
else {
|
|
state.tokenize = tokenBase;
|
|
return null;
|
|
}
|
|
}
|
|
|
|
function tokenComment(stream, state) {
|
|
var maybeEnd = false, ch;
|
|
while (ch = stream.next()) {
|
|
if (ch == "/" && maybeEnd) {
|
|
state.tokenize = tokenBase;
|
|
break;
|
|
}
|
|
maybeEnd = (ch == "*");
|
|
}
|
|
return "comment";
|
|
}
|
|
|
|
function tokenString(quote) {
|
|
return function (stream, state) {
|
|
var escaped = false, next, end = false;
|
|
while ((next = stream.next()) != null) {
|
|
if (next == quote && !escaped) {
|
|
end = true;
|
|
break;
|
|
}
|
|
escaped = !escaped && next == "\\";
|
|
}
|
|
if (end || !escaped)
|
|
state.tokenize = tokenBase;
|
|
return "string";
|
|
};
|
|
}
|
|
|
|
function buildElectricInputRegEx() {
|
|
// Reindentation should occur on [] or on a match of any of
|
|
// the block closing keywords, at the end of a line.
|
|
var allClosings = middle.concat(end);
|
|
return new RegExp("[\\[\\]]|(" + allClosings.join("|") + ")$");
|
|
}
|
|
|
|
return {
|
|
|
|
startState: function () {
|
|
return {
|
|
tokenize: tokenBase,
|
|
currentIndent: 0,
|
|
doInCurrentLine: false,
|
|
hasPassedFirstStage: false
|
|
};
|
|
},
|
|
|
|
token: function (stream, state) {
|
|
if (stream.sol())
|
|
state.doInCurrentLine = 0;
|
|
|
|
return state.tokenize(stream, state);
|
|
},
|
|
|
|
indent: function (state, textAfter) {
|
|
var trueText = textAfter.replace(/^\s+|\s+$/g, '');
|
|
|
|
if (trueText.match(endKeywords) || trueText.match(middleKeywords) || trueText.match(/(\[])/))
|
|
return conf.indentUnit * (state.currentIndent - 1);
|
|
|
|
if (state.currentIndent < 0)
|
|
return 0;
|
|
|
|
return state.currentIndent * conf.indentUnit;
|
|
},
|
|
fold: "indent",
|
|
electricInput: buildElectricInputRegEx(),
|
|
lineComment: "%",
|
|
blockCommentStart: "/*",
|
|
blockCommentEnd: "*/"
|
|
};
|
|
});
|
|
|
|
CodeMirror.defineMIME("text/x-oz", "oz");
|
|
|
|
});
|