Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
expanded built-in markdown module with more standard features
- Loading branch information
1 parent
8cc6912
commit 7bff6e0
Showing
8 changed files
with
258 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,127 @@ | ||
# Insert each marker as a separate text token, and add it to delimiter list | ||
# | ||
def tokenize(state, silent) { | ||
var i, scanned, token, len, ch, | ||
start = state.pos, | ||
marker = state.src[start] | ||
|
||
if silent return false | ||
|
||
if marker != '=' return false | ||
|
||
scanned = state.scan_delims(state.pos, true) | ||
len = scanned.length | ||
ch = marker | ||
|
||
if len < 2 return false | ||
|
||
if len % 2 > 0 { | ||
token = state.push('text', '', 0) | ||
token.content = ch | ||
len-- | ||
} | ||
|
||
iter i = 0; i < len; i += 2 { | ||
token = state.push('text', '', 0) | ||
token.content = ch + ch | ||
|
||
if !scanned.can_open and !scanned.can_close continue | ||
|
||
state.delimiters.append({ | ||
marker: marker, | ||
length: 0, # disable "rule of 3" length checks meant for emphasis | ||
jump: i / 2, # 1 delimiter = 2 characters | ||
token: state.tokens.length() - 1, | ||
end: -1, | ||
open: scanned.can_open, | ||
close: scanned.can_close | ||
}) | ||
} | ||
|
||
state.pos += scanned.length | ||
|
||
return true | ||
} | ||
|
||
|
||
# Walk through delimiter list and replace text tokens with tags | ||
# | ||
def _post_process(state, delimiters) { | ||
var i, j, | ||
start_delim, | ||
end_delim, | ||
token, | ||
lone_markers = [], | ||
max = delimiters.length() | ||
|
||
iter i = 0; i < max; i++ { | ||
start_delim = delimiters[i] | ||
|
||
if start_delim.marker != '=' { | ||
continue | ||
} | ||
|
||
if start_delim.end == -1 { | ||
continue | ||
} | ||
|
||
end_delim = delimiters[start_delim.end] | ||
|
||
token = state.tokens[start_delim.token] | ||
token.type = 'mark_open' | ||
token.tag = 'mark' | ||
token.nesting = 1 | ||
token.markup = '==' | ||
token.content = '' | ||
|
||
token = state.tokens[end_delim.token] | ||
token.type = 'mark_close' | ||
token.tag = 'mark' | ||
token.nesting = -1 | ||
token.markup = '==' | ||
token.content = '' | ||
|
||
if (state.tokens[end_delim.token - 1].type == 'text' and | ||
state.tokens[end_delim.token - 1].content == '=') { | ||
|
||
lone_markers.append(end_delim.token - 1) | ||
} | ||
} | ||
|
||
# If a marker sequence has an odd number of characters, it's splitted | ||
# like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the | ||
# start of the sequence. | ||
# | ||
# So, we have to move all those markers after subsequent s_close tags. | ||
# | ||
while lone_markers.length() > 0 { | ||
i = lone_markers.pop() | ||
j = i + 1 | ||
|
||
while j < state.tokens.length and state.tokens[j].type == 'mark_close' { | ||
j++ | ||
} | ||
|
||
j-- | ||
|
||
if i != j { | ||
token = state.tokens[j] | ||
state.tokens[j] = state.tokens[i] | ||
state.tokens[i] = token | ||
} | ||
} | ||
} | ||
|
||
def post_process(state) { | ||
var curr, | ||
tokens_meta = state.tokens_meta, | ||
max = (state.tokens_meta or []).length() | ||
|
||
_post_process(state, state.delimiters) | ||
|
||
iter curr = 0; curr < max; curr++ { | ||
if tokens_meta[curr] and tokens_meta[curr].delimiters { | ||
_post_process(state, tokens_meta[curr].delimiters) | ||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
import ..common.utils { UNESCAPE_RE, UNESCAPE_SPACE_RE } | ||
|
||
|
||
def subscript(state, silent) { | ||
var found, | ||
content, | ||
token, | ||
max = state.pos_max, | ||
start = state.pos | ||
|
||
if state.src[start] != '~' return false | ||
if silent return false # don't run any pairs in validation mode | ||
if start + 2 >= max return false | ||
|
||
state.pos = start + 1 | ||
|
||
while state.pos < max { | ||
if state.src[state.pos] == '~' { | ||
found = true | ||
break | ||
} | ||
|
||
state.md.inline.skip_token(state) | ||
} | ||
|
||
if !found or start + 1 == state.pos { | ||
state.pos = start | ||
return false | ||
} | ||
|
||
content = state.src[start + 1, state.pos] | ||
|
||
# don't allow unescaped spaces/newlines inside | ||
if content.match(UNESCAPE_SPACE_RE) { | ||
state.pos = start | ||
return false | ||
} | ||
|
||
# found! | ||
state.pos_max = state.pos | ||
state.pos = start + 1 | ||
|
||
# Earlier we checked !silent, but this implementation does not need it | ||
token = state.push('sub_open', 'sub', 1) | ||
token.markup = '~' | ||
|
||
token = state.push('text', '', 0) | ||
token.content = content.replace(UNESCAPE_RE, '$1') | ||
|
||
token = state.push('sub_close', 'sub', -1) | ||
token.markup = '~' | ||
|
||
state.pos = state.pos_max + 1 | ||
state.pos_max = max | ||
return true | ||
} | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
import ..common.utils { UNESCAPE_RE, UNESCAPE_SPACE_RE } | ||
|
||
def superscript(state, silent) { | ||
var found, | ||
content, | ||
token, | ||
max = state.pos_max, | ||
start = state.pos | ||
|
||
if state.src[start] != '^' return false | ||
if silent return false # don't run any pairs in validation mode | ||
if start + 2 >= max return false | ||
|
||
state.pos = start + 1 | ||
|
||
while state.pos < max { | ||
if state.src[state.pos] == '^' { | ||
found = true | ||
break | ||
} | ||
|
||
state.md.inline.skip_token(state) | ||
} | ||
|
||
if !found or start + 1 == state.pos { | ||
state.pos = start | ||
return false | ||
} | ||
|
||
content = state.src[start + 1, state.pos] | ||
|
||
# don't allow unescaped spaces/newlines inside | ||
if content.match(UNESCAPE_SPACE_RE) { | ||
state.pos = start | ||
return false | ||
} | ||
|
||
# found! | ||
state.pos_max = state.pos | ||
state.pos = start + 1 | ||
|
||
# Earlier we checked !silent, but this implementation does not need it | ||
token = state.push('sup_open', 'sup', 1) | ||
token.markup = '^' | ||
|
||
token = state.push('text', '', 0) | ||
token.content = content.replace(UNESCAPE_RE, '$1') | ||
|
||
token = state.push('sup_close', 'sup', -1) | ||
token.markup = '^' | ||
|
||
state.pos = state.pos_max + 1 | ||
state.pos_max = max | ||
return true | ||
} | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters