#!/usr/bin/env python version = "1.6b" version_info = (1,6,2,"rc-2") __revision__ = "$Rev$" """ Python-Markdown =============== Converts Markdown to HTML. Basic usage as a module: import markdown md = Markdown() html = md.convert(your_text_string) See http://www.freewisdom.org/projects/python-markdown/ for more information and instructions on how to extend the functionality of the script. (You might want to read that before you try modifying this file.) Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and maintained by [Yuri Takhteyev](http://www.freewisdom.org). Contact: yuri [at] freewisdom.org License: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD """ import re, sys, os, random, codecs # Set debug level: 3 none, 2 critical, 1 informative, 0 all (VERBOSE, INFO, CRITICAL, NONE) = range(4) MESSAGE_THRESHOLD = CRITICAL def message(level, text) : if level >= MESSAGE_THRESHOLD : print text # --------------- CONSTANTS YOU MIGHT WANT TO MODIFY ----------------- TAB_LENGTH = 4 # expand tabs to this many spaces ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz"> SMART_EMPHASIS = 1 # this_or_that does not become thisorthat HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'), # from Hebrew to Nko (includes Arabic, Syriac and Thaana) (u'\u2D30', u'\u2D7F'), # Tifinagh ) # Unicode Reference Table: # 0590-05FF - Hebrew # 0600-06FF - Arabic # 0700-074F - Syriac # 0750-077F - Arabic Supplement # 0780-07BF - Thaana # 07C0-07FF - Nko BOMS = { 'utf-8' : (codecs.BOM_UTF8, ), 'utf-16' : (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE), #'utf-32' : (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE) } def removeBOM(text, encoding): convert = isinstance(text, unicode) for bom in BOMS[encoding]: bom = convert and bom.decode(encoding) or bom if text.startswith(bom): return text.lstrip(bom) return text # The following constant specifies the name used in the usage # statement displayed for python versions lower than 2.3. (With # python2.3 and higher the usage statement is generated by optparse # and uses the actual name of the executable called.) EXECUTABLE_NAME_FOR_USAGE = "python markdown.py" # --------------- CONSTANTS YOU _SHOULD NOT_ HAVE TO CHANGE ---------- # a template for html placeholders HTML_PLACEHOLDER_PREFIX = "qaodmasdkwaspemas" HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%dajkqlsmdqpakldnzsdfls" BLOCK_LEVEL_ELEMENTS = ['p', 'div', 'blockquote', 'pre', 'table', 'dl', 'ol', 'ul', 'script', 'noscript', 'form', 'fieldset', 'iframe', 'math', 'ins', 'del', 'hr', 'hr/', 'style'] def is_block_level (tag) : return ( (tag in BLOCK_LEVEL_ELEMENTS) or (tag[0] == 'h' and tag[1] in "0123456789") ) """ ====================================================================== ========================== NANODOM =================================== ====================================================================== The three classes below implement some of the most basic DOM methods. I use this instead of minidom because I need a simpler functionality and do not want to require additional libraries. Importantly, NanoDom does not do normalization, which is what we want. It also adds extra white space when converting DOM to string """ ENTITY_NORMALIZATION_EXPRESSIONS = [ (re.compile("&"), "&"), (re.compile("<"), "<"), (re.compile(">"), ">"), (re.compile("\""), """)] ENTITY_NORMALIZATION_EXPRESSIONS_SOFT = [ (re.compile("&(?!\#)"), "&"), (re.compile("<"), "<"), (re.compile(">"), ">"), (re.compile("\""), """)] def getBidiType(text) : if not text : return None ch = text[0] if not isinstance(ch, unicode) or not ch.isalpha(): return None else : for min, max in RTL_BIDI_RANGES : if ( ch >= min and ch <= max ) : return "rtl" else : return "ltr" class Document : def __init__ (self) : self.bidi = "ltr" def appendChild(self, child) : self.documentElement = child child.isDocumentElement = True child.parent = self self.entities = {} def setBidi(self, bidi) : if bidi : self.bidi = bidi def createElement(self, tag, textNode=None) : el = Element(tag) el.doc = self if textNode : el.appendChild(self.createTextNode(textNode)) return el def createTextNode(self, text) : node = TextNode(text) node.doc = self return node def createEntityReference(self, entity): if entity not in self.entities: self.entities[entity] = EntityReference(entity) return self.entities[entity] def createCDATA(self, text) : node = CDATA(text) node.doc = self return node def toxml (self) : return self.documentElement.toxml() def normalizeEntities(self, text, avoidDoubleNormalizing=False) : if avoidDoubleNormalizing : regexps = ENTITY_NORMALIZATION_EXPRESSIONS_SOFT else : regexps = ENTITY_NORMALIZATION_EXPRESSIONS for regexp, substitution in regexps : text = regexp.sub(substitution, text) return text def find(self, test) : return self.documentElement.find(test) def unlink(self) : self.documentElement.unlink() self.documentElement = None class CDATA : type = "cdata" def __init__ (self, text) : self.text = text def handleAttributes(self) : pass def toxml (self) : return "" class Element : type = "element" def __init__ (self, tag) : self.nodeName = tag self.attributes = [] self.attribute_values = {} self.childNodes = [] self.bidi = None self.isDocumentElement = False def setBidi(self, bidi) : if bidi : orig_bidi = self.bidi if not self.bidi or self.isDocumentElement: # Once the bidi is set don't change it (except for doc element) self.bidi = bidi self.parent.setBidi(bidi) def unlink(self) : for child in self.childNodes : if child.type == "element" : child.unlink() self.childNodes = None def setAttribute(self, attr, value) : if not attr in self.attributes : self.attributes.append(attr) self.attribute_values[attr] = value def insertChild(self, position, child) : self.childNodes.insert(position, child) child.parent = self def removeChild(self, child) : self.childNodes.remove(child) def replaceChild(self, oldChild, newChild) : position = self.childNodes.index(oldChild) self.removeChild(oldChild) self.insertChild(position, newChild) def appendChild(self, child) : self.childNodes.append(child) child.parent = self def handleAttributes(self) : pass def find(self, test, depth=0) : """ Returns a list of descendants that pass the test function """ matched_nodes = [] for child in self.childNodes : if test(child) : matched_nodes.append(child) if child.type == "element" : matched_nodes += child.find(test, depth+1) return matched_nodes def toxml(self): if ENABLE_ATTRIBUTES : for child in self.childNodes: child.handleAttributes() buffer = "" if self.nodeName in ['h1', 'h2', 'h3', 'h4'] : buffer += "\n" elif self.nodeName in ['li'] : buffer += "\n " # Process children FIRST, then do the attributes childBuffer = "" if self.childNodes or self.nodeName in ['blockquote']: childBuffer += ">" for child in self.childNodes : childBuffer += child.toxml() if self.nodeName == 'p' : childBuffer += "\n" elif self.nodeName == 'li' : childBuffer += "\n " childBuffer += "%s>" % self.nodeName else : childBuffer += "/>" buffer += "<" + self.nodeName if self.nodeName in ['p', 'li', 'ul', 'ol', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'] : if not self.attribute_values.has_key("dir"): if self.bidi : bidi = self.bidi else : bidi = self.doc.bidi if bidi=="rtl" : self.setAttribute("dir", "rtl") for attr in self.attributes : value = self.attribute_values[attr] value = self.doc.normalizeEntities(value, avoidDoubleNormalizing=True) buffer += ' %s="%s"' % (attr, value) # Now let's actually append the children buffer += childBuffer if self.nodeName in ['p', 'br ', 'li', 'ul', 'ol', 'h1', 'h2', 'h3', 'h4'] : buffer += "\n" return buffer class TextNode : type = "text" attrRegExp = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123} def __init__ (self, text) : self.value = text def attributeCallback(self, match) : self.parent.setAttribute(match.group(1), match.group(2)) def handleAttributes(self) : self.value = self.attrRegExp.sub(self.attributeCallback, self.value) def toxml(self) : text = self.value self.parent.setBidi(getBidiType(text)) if not text.startswith(HTML_PLACEHOLDER_PREFIX): if self.parent.nodeName == "p" : text = text.replace("\n", "\n ") elif (self.parent.nodeName == "li" and self.parent.childNodes[0]==self): text = "\n " + text.replace("\n", "\n ") text = self.doc.normalizeEntities(text) return text class EntityReference: type = "entity_ref" def __init__(self, entity): self.entity = entity def handleAttributes(self): pass def toxml(self): return "&" + self.entity + ";" """ ====================================================================== ========================== PRE-PROCESSORS ============================ ====================================================================== Preprocessors munge source text before we start doing anything too complicated. Each preprocessor implements a "run" method that takes a pointer to a list of lines of the document, modifies it as necessary and returns either the same pointer or a pointer to a new list. Preprocessors must extend markdown.Preprocessor. """ class Preprocessor : pass class HeaderPreprocessor (Preprocessor): """ Replaces underlined headers with hashed headers to avoid the nead for lookahead later. """ def run (self, lines) : i = -1 while i+1 < len(lines) : i = i+1 if not lines[i].strip() : continue if lines[i].startswith("#") : lines.insert(i+1, "\n") if (i+1 <= len(lines) and lines[i+1] and lines[i+1][0] in ['-', '=']) : underline = lines[i+1].strip() if underline == "="*len(underline) : lines[i] = "# " + lines[i].strip() lines[i+1] = "" elif underline == "-"*len(underline) : lines[i] = "## " + lines[i].strip() lines[i+1] = "" return lines HEADER_PREPROCESSOR = HeaderPreprocessor() class LinePreprocessor (Preprocessor): """Deals with HR lines (needs to be done before processing lists)""" blockquote_re = re.compile(r'^(> )+') def run (self, lines) : for i in range(len(lines)) : prefix = '' m = self.blockquote_re.search(lines[i]) if m : prefix = m.group(0) if self._isLine(lines[i][len(prefix):]) : lines[i] = prefix + self.stash.store("
%s\n
" % (HTML_PLACEHOLDER % i), html + "\n") text = text.replace(HTML_PLACEHOLDER % i, html) return text def escape(self, html): ''' Basic html escaping ''' html = html.replace('&', '&') html = html.replace('<', '<') html = html.replace('>', '>') return html.replace('"', '"') RAWHTMLTEXTPOSTPROCESSOR = RawHtmlTextPostprocessor() """ ====================================================================== ========================== MISC AUXILIARY CLASSES ==================== ====================================================================== """ class HtmlStash : """This class is used for stashing HTML objects that we extract in the beginning and replace with place-holders.""" def __init__ (self) : self.html_counter = 0 # for counting inline html segments self.rawHtmlBlocks=[] def store(self, html, safe=False) : """Saves an HTML segment for later reinsertion. Returns a placeholder string that needs to be inserted into the document. @param html: an html segment @param safe: label an html segment as safe for safemode @param inline: label a segmant as inline html @returns : a placeholder string """ self.rawHtmlBlocks.append((html, safe)) placeholder = HTML_PLACEHOLDER % self.html_counter self.html_counter += 1 return placeholder class BlockGuru : def _findHead(self, lines, fn, allowBlank=0) : """Functional magic to help determine boundaries of indented blocks. @param lines: an array of strings @param fn: a function that returns a substring of a string if the string matches the necessary criteria @param allowBlank: specifies whether it's ok to have blank lines between matching functions @returns: a list of post processes items and the unused remainder of the original list""" items = [] item = -1 i = 0 # to keep track of where we are for line in lines : if not line.strip() and not allowBlank: return items, lines[i:] if not line.strip() and allowBlank: # If we see a blank line, this _might_ be the end i += 1 # Find the next non-blank line for j in range(i, len(lines)) : if lines[j].strip() : next = lines[j] break else : # There is no more text => this is the end break # Check if the next non-blank line is still a part of the list part = fn(next) if part : items.append("") continue else : break # found end of the list part = fn(line) if part : items.append(part) i += 1 continue else : return items, lines[i:] else : i += 1 return items, lines[i:] def detabbed_fn(self, line) : """ An auxiliary method to be passed to _findHead """ m = RE.regExp['tabbed'].match(line) if m: return m.group(4) else : return None def detectTabbed(self, lines) : return self._findHead(lines, self.detabbed_fn, allowBlank = 1) def print_error(string): """Print an error string to stderr""" sys.stderr.write(string +'\n') def dequote(string) : """ Removes quotes from around a string """ if ( ( string.startswith('"') and string.endswith('"')) or (string.startswith("'") and string.endswith("'")) ) : return string[1:-1] else : return string """ ====================================================================== ========================== CORE MARKDOWN ============================= ====================================================================== This stuff is ugly, so if you are thinking of extending the syntax, see first if you can do it via pre-processors, post-processors, inline patterns or a combination of the three. """ class CorePatterns : """This class is scheduled for removal as part of a refactoring effort.""" patterns = { 'header': r'(#*)([^#]*)(#*)', # # A title 'reference-def' : r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)', # [Google]: http://www.google.com/ 'containsline': r'([-]*)$|^([=]*)', # -----, =====, etc. 'ol': r'[ ]{0,3}[\d]*\.\s+(.*)', # 1. text 'ul': r'[ ]{0,3}[*+-]\s+(.*)', # "* text" 'isline1': r'(\**)', # *** 'isline2': r'(\-*)', # --- 'isline3': r'(\_*)', # ___ 'tabbed': r'((\t)|( ))(.*)', # an indented line 'quoted' : r'> ?(.*)', # a quoted block ("> ...") } def __init__ (self) : self.regExp = {} for key in self.patterns.keys() : self.regExp[key] = re.compile("^%s$" % self.patterns[key], re.DOTALL) self.regExp['containsline'] = re.compile(r'^([-]*)$|^([=]*)$', re.M) RE = CorePatterns() class Markdown: """ Markdown formatter class for creating an html document from Markdown text """ def __init__(self, source=None, # deprecated extensions=[], extension_configs=None, encoding="utf-8", safe_mode = False): """Creates a new Markdown instance. @param source: The text in Markdown format. @param encoding: The character encoding ofaround it - append the paragraph bits directly # onto parent_elem el = parent_elem else : # Otherwise make a "p" element el = self.doc.createElement("p") parent_elem.appendChild(el) for item in list : el.appendChild(item) def _processUList(self, parent_elem, lines, inList) : self._processList(parent_elem, lines, inList, listexpr='ul', tag = 'ul') def _processOList(self, parent_elem, lines, inList) : self._processList(parent_elem, lines, inList, listexpr='ol', tag = 'ol') def _processList(self, parent_elem, lines, inList, listexpr, tag) : """Given a list of document lines starting with a list item, finds the end of the list, breaks it up, and recursively processes each list item and the remainder of the text file. @param parent_elem: A dom element to which the content will be added @param lines: a list of lines @param inList: a level @returns: None""" ul = self.doc.createElement(tag) # ul might actually be '
") and recursively processes the
the remainder of the text file.
@param parent_elem: DOM element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
detabbed, theRest = self.blockGuru.detectTabbed(lines)
pre = self.doc.createElement('pre')
code = self.doc.createElement('code')
parent_elem.appendChild(pre)
pre.appendChild(code)
text = "\n".join(detabbed).rstrip()+"\n"
#text = text.replace("&", "&")
code.appendChild(self.doc.createTextNode(text))
self._processSection(parent_elem, theRest, inList)
def _handleInlineWrapper (self, line, patternIndex=0) :
parts = [line]
while patternIndex < len(self.inlinePatterns) :
i = 0
while i < len(parts) :
x = parts[i]
if isinstance(x, (str, unicode)) :
result = self._applyPattern(x, \
self.inlinePatterns[patternIndex], \
patternIndex )
if result :
i -= 1
parts.remove(x)
for y in result :
parts.insert(i+1,y)
i += 1
patternIndex += 1
for i in range(len(parts)) :
x = parts[i]
if isinstance(x, (str, unicode)) :
parts[i] = self.doc.createTextNode(x)
return parts
def _handleInline(self, line):
"""Transform a Markdown line with inline elements to an XHTML
fragment.
This function uses auxiliary objects called inline patterns.
See notes on inline patterns above.
@param item: A block of Markdown text
@return: A list of NanoDom nodes """
if not(line):
return [self.doc.createTextNode(' ')]
for pattern in self.inlinePatterns :
list = self._applyPattern( line, pattern)
if list: return list
return [self.doc.createTextNode(line)]
def _applyPattern(self, line, pattern, patternIndex) :
""" Given a pattern name, this function checks if the line
fits the pattern, creates the necessary elements, and returns
back a list consisting of NanoDom elements and/or strings.
@param line: the text to be processed
@param pattern: the pattern to be checked
@returns: the appropriate newly created NanoDom element if the
pattern matches, None otherwise.
"""
# match the line to pattern's pre-compiled reg exp.
# if no match, move on.
m = pattern.getCompiledRegExp().match(line)
if not m :
return None
# if we got a match let the pattern make us a NanoDom node
# if it doesn't, move on
node = pattern.handleMatch(m, self.doc)
# check if any of this nodes have children that need processing
if isinstance(node, Element):
if not node.nodeName in ["code", "pre"] :
for child in node.childNodes :
if isinstance(child, TextNode):
result = self._handleInlineWrapper(child.value, patternIndex+1)
if result:
if result == [child] :
continue
result.reverse()
#to make insertion easier
position = node.childNodes.index(child)
node.removeChild(child)
for item in result:
if isinstance(item, (str, unicode)):
if len(item) > 0:
node.insertChild(position,
self.doc.createTextNode(item))
else:
node.insertChild(position, item)
if node :
# Those are in the reverse order!
return ( m.groups()[-1], # the string to the left
node, # the new node
m.group(1)) # the string to the right of the match
else :
return None
def convert (self, source = None):
"""Return the document in XHTML format.
@returns: A serialized XHTML body."""
#try :
if source is not None: #Allow blank string
self.source = source
if not self.source :
return ""
self.source = removeBOM(self.source, self.encoding)
for pp in self.textPreprocessors :
self.source = pp.run(self.source)
doc = self._transform()
xml = doc.toxml()
#finally:
# doc.unlink()
# Return everything but the top level tag
if self.stripTopLevelTags :
xml = xml.strip()[23:-7] + "\n"
for pp in self.textPostprocessors :
xml = pp.run(xml)
return (self.docType + xml).strip()
__str__ = convert # deprecated - will be changed in 1.7 to report
# information about the MD instance
toString = __str__ # toString() method is deprecated
def __unicode__(self):
"""Return the document in XHTML format as a Unicode object.
"""
return str(self)#.decode(self.encoding)
toUnicode = __unicode__ # deprecated - will be removed in 1.7
# ====================================================================
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
message_threshold = CRITICAL,
safe = False) :
global MESSAGE_THRESHOLD
MESSAGE_THRESHOLD = message_threshold
message(VERBOSE, "input file: %s" % input)
if not encoding :
encoding = "utf-8"
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
new_text = markdown(text, extensions, encoding, safe_mode = safe)
if output :
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(new_text)
output_file.close()
else :
sys.stdout.write(new_text.encode(encoding))
def markdown(text,
extensions = [],
encoding = None,
safe_mode = False) :
message(VERBOSE, "in markdown.markdown(), received text:\n%s" % text)
extension_names = []
extension_configs = {}
for ext in extensions :
pos = ext.find("(")
if pos == -1 :
extension_names.append(ext)
else :
name = ext[:pos]
extension_names.append(name)
pairs = [x.split("=") for x in ext[pos+1:-1].split(",")]
configs = [(x.strip(), y.strip()) for (x, y) in pairs]
extension_configs[name] = configs
md = Markdown(extensions=extension_names,
extension_configs=extension_configs,
safe_mode = safe_mode)
return md.convert(text)
class Extension :
def __init__(self, configs = {}) :
self.config = configs
def getConfig(self, key) :
if self.config.has_key(key) :
return self.config[key][0]
else :
return ""
def getConfigInfo(self) :
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value) :
self.config[key][0] = value
OPTPARSE_WARNING = """
Python 2.3 or higher required for advanced command line options.
For lower versions of Python use:
%s INPUT_FILE > OUTPUT_FILE
""" % EXECUTABLE_NAME_FOR_USAGE
def parse_options() :
try :
optparse = __import__("optparse")
except :
if len(sys.argv) == 2 :
return {'input' : sys.argv[1],
'output' : None,
'message_threshold' : CRITICAL,
'safe' : False,
'extensions' : [],
'encoding' : None }
else :
print OPTPARSE_WARNING
return None
parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
parser.add_option("-f", "--file", dest="filename",
help="write output to OUTPUT_FILE",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="encoding for input and output files",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=NONE, dest="verbose",
help="suppress all messages")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="print info messages")
parser.add_option("-s", "--safe",
action="store_const", const=True, dest="safe",
help="same mode (strip user's HTML tag)")
parser.add_option("--noisy",
action="store_const", const=VERBOSE, dest="verbose",
help="print debug messages")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "load extension EXTENSION", metavar="EXTENSION")
(options, args) = parser.parse_args()
if not len(args) == 1 :
parser.print_help()
return None
else :
input_file = args[0]
if not options.extensions :
options.extensions = []
return {'input' : input_file,
'output' : options.filename,
'message_threshold' : options.verbose,
'safe' : options.safe,
'extensions' : options.extensions,
'encoding' : options.encoding }
if __name__ == '__main__':
""" Run Markdown from the command line. """
options = parse_options()
#if os.access(inFile, os.R_OK):
if not options :
sys.exit(0)
markdownFromFile(**options)