#!/usr/bin/env python version = "1.6b" version_info = (1,6,2,"rc-2") __revision__ = "$Rev$" """ Python-Markdown =============== Converts Markdown to HTML. Basic usage as a module: import markdown md = Markdown() html = markdown.convert(your_text_string) See http://www.freewisdom.org/projects/python-markdown/ for more information and instructions on how to extend the functionality of the script. (You might want to read that before you try modifying this file.) Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and maintained by [Yuri Takhteyev](http://www.freewisdom.org). Contact: yuri [at] freewisdom.org License: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD """ import re, sys, os, random, codecs # Set debug level: 3 none, 2 critical, 1 informative, 0 all (VERBOSE, INFO, CRITICAL, NONE) = range(4) MESSAGE_THRESHOLD = CRITICAL def message(level, text) : if level >= MESSAGE_THRESHOLD : print text # --------------- CONSTANTS YOU MIGHT WANT TO MODIFY ----------------- TAB_LENGTH = 4 # expand tabs to this many spaces ENABLE_ATTRIBUTES = True # @id = xyz -> <... id="xyz"> SMART_EMPHASIS = 1 # this_or_that does not become thisorthat HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'), # from Hebrew to Nko (includes Arabic, Syriac and Thaana) (u'\u2D30', u'\u2D7F'), # Tifinagh ) # Unicode Reference Table: # 0590-05FF - Hebrew # 0600-06FF - Arabic # 0700-074F - Syriac # 0750-077F - Arabic Supplement # 0780-07BF - Thaana # 07C0-07FF - Nko BOMS = { 'utf-8' : (unicode(codecs.BOM_UTF8, "utf-8"), ), 'utf-16' : (unicode(codecs.BOM_UTF16_LE, "utf-16"), unicode(codecs.BOM_UTF16_BE, "utf-16")), #'utf-32' : (unicode(codecs.BOM_UTF32_LE, "utf-32"), # unicode(codecs.BOM_UTF32_BE, "utf-32")), } def removeBOM(text, encoding): for bom in BOMS[encoding]: if text.startswith(bom): return text.lstrip(bom) return text # The following constant specifies the name used in the usage # statement displayed for python versions lower than 2.3. (With # python2.3 and higher the usage statement is generated by optparse # and uses the actual name of the executable called.) EXECUTABLE_NAME_FOR_USAGE = "python markdown.py" # --------------- CONSTANTS YOU _SHOULD NOT_ HAVE TO CHANGE ---------- # a template for html placeholders HTML_PLACEHOLDER_PREFIX = "qaodmasdkwaspemas" HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%dajkqlsmdqpakldnzsdfls" BLOCK_LEVEL_ELEMENTS = ['p', 'div', 'blockquote', 'pre', 'table', 'dl', 'ol', 'ul', 'script', 'noscript', 'form', 'fieldset', 'iframe', 'math', 'ins', 'del', 'hr', 'hr/', 'style'] def is_block_level (tag) : return ( (tag in BLOCK_LEVEL_ELEMENTS) or (tag[0] == 'h' and tag[1] in "0123456789") ) """ ====================================================================== ========================== NANODOM =================================== ====================================================================== The three classes below implement some of the most basic DOM methods. I use this instead of minidom because I need a simpler functionality and do not want to require additional libraries. Importantly, NanoDom does not do normalization, which is what we want. It also adds extra white space when converting DOM to string """ ENTITY_NORMALIZATION_EXPRESSIONS = [ (re.compile("&"), "&"), (re.compile("<"), "<"), (re.compile(">"), ">"), (re.compile("\""), """)] ENTITY_NORMALIZATION_EXPRESSIONS_SOFT = [ (re.compile("&(?!\#)"), "&"), (re.compile("<"), "<"), (re.compile(">"), ">"), (re.compile("\""), """)] def getBidiType(text) : if not text : return None ch = text[0] if not isinstance(ch, unicode) or not ch.isalpha(): return None else : for min, max in RTL_BIDI_RANGES : if ( ch >= min and ch <= max ) : return "rtl" else : return "ltr" class Document : def __init__ (self) : self.bidi = "ltr" def appendChild(self, child) : self.documentElement = child child.isDocumentElement = True child.parent = self self.entities = {} def setBidi(self, bidi) : if bidi : self.bidi = bidi def createElement(self, tag, textNode=None) : el = Element(tag) el.doc = self if textNode : el.appendChild(self.createTextNode(textNode)) return el def createTextNode(self, text) : node = TextNode(text) node.doc = self return node def createEntityReference(self, entity): if entity not in self.entities: self.entities[entity] = EntityReference(entity) return self.entities[entity] def createCDATA(self, text) : node = CDATA(text) node.doc = self return node def toxml (self) : return self.documentElement.toxml() def normalizeEntities(self, text, avoidDoubleNormalizing=False) : if avoidDoubleNormalizing : regexps = ENTITY_NORMALIZATION_EXPRESSIONS_SOFT else : regexps = ENTITY_NORMALIZATION_EXPRESSIONS for regexp, substitution in regexps : text = regexp.sub(substitution, text) return text def find(self, test) : return self.documentElement.find(test) def unlink(self) : self.documentElement.unlink() self.documentElement = None class CDATA : type = "cdata" def __init__ (self, text) : self.text = text def handleAttributes(self) : pass def toxml (self) : return "" class Element : type = "element" def __init__ (self, tag) : self.nodeName = tag self.attributes = [] self.attribute_values = {} self.childNodes = [] self.bidi = None self.isDocumentElement = False def setBidi(self, bidi) : if bidi : orig_bidi = self.bidi if not self.bidi or self.isDocumentElement: # Once the bidi is set don't change it (except for doc element) self.bidi = bidi self.parent.setBidi(bidi) def unlink(self) : for child in self.childNodes : if child.type == "element" : child.unlink() self.childNodes = None def setAttribute(self, attr, value) : if not attr in self.attributes : self.attributes.append(attr) self.attribute_values[attr] = value def insertChild(self, position, child) : self.childNodes.insert(position, child) child.parent = self def removeChild(self, child) : self.childNodes.remove(child) def replaceChild(self, oldChild, newChild) : position = self.childNodes.index(oldChild) self.removeChild(oldChild) self.insertChild(position, newChild) def appendChild(self, child) : self.childNodes.append(child) child.parent = self def handleAttributes(self) : pass def find(self, test, depth=0) : """ Returns a list of descendants that pass the test function """ matched_nodes = [] for child in self.childNodes : if test(child) : matched_nodes.append(child) if child.type == "element" : matched_nodes += child.find(test, depth+1) return matched_nodes def toxml(self): if ENABLE_ATTRIBUTES : for child in self.childNodes: child.handleAttributes() buffer = "" if self.nodeName in ['h1', 'h2', 'h3', 'h4'] : buffer += "\n" elif self.nodeName in ['li'] : buffer += "\n " # Process children FIRST, then do the attributes childBuffer = "" if self.childNodes or self.nodeName in ['blockquote']: childBuffer += ">" for child in self.childNodes : childBuffer += child.toxml() if self.nodeName == 'p' : childBuffer += "\n" elif self.nodeName == 'li' : childBuffer += "\n " childBuffer += "%s>" % self.nodeName else : childBuffer += "/>" buffer += "<" + self.nodeName if self.nodeName in ['p', 'li', 'ul', 'ol', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'] : if not self.attribute_values.has_key("dir"): if self.bidi : bidi = self.bidi else : bidi = self.doc.bidi if bidi=="rtl" : self.setAttribute("dir", "rtl") for attr in self.attributes : value = self.attribute_values[attr] value = self.doc.normalizeEntities(value, avoidDoubleNormalizing=True) buffer += ' %s="%s"' % (attr, value) # Now let's actually append the children buffer += childBuffer if self.nodeName in ['p', 'li', 'ul', 'ol', 'h1', 'h2', 'h3', 'h4'] : buffer += "\n" return buffer class TextNode : type = "text" attrRegExp = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123} def __init__ (self, text) : self.value = text def attributeCallback(self, match) : self.parent.setAttribute(match.group(1), match.group(2)) def handleAttributes(self) : self.value = self.attrRegExp.sub(self.attributeCallback, self.value) def toxml(self) : text = self.value self.parent.setBidi(getBidiType(text)) if not text.startswith(HTML_PLACEHOLDER_PREFIX): if self.parent.nodeName == "p" : text = text.replace("\n", "\n ") elif (self.parent.nodeName == "li" and self.parent.childNodes[0]==self): text = "\n " + text.replace("\n", "\n ") text = self.doc.normalizeEntities(text) return text class EntityReference: type = "entity_ref" def __init__(self, entity): self.entity = entity def handleAttributes(self): pass def toxml(self): return "&" + self.entity + ";" """ ====================================================================== ========================== PRE-PROCESSORS ============================ ====================================================================== Preprocessors munge source text before we start doing anything too complicated. Each preprocessor implements a "run" method that takes a pointer to a list of lines of the document, modifies it as necessary and returns either the same pointer or a pointer to a new list. Preprocessors must extend markdown.Preprocessor. """ class Preprocessor : pass class HeaderPreprocessor (Preprocessor): """ Replaces underlined headers with hashed headers to avoid the nead for lookahead later. """ def run (self, lines) : i = -1 while i+1 < len(lines) : i = i+1 if not lines[i].strip() : continue if lines[i].startswith("#") : lines.insert(i+1, "\n") if (i+1 <= len(lines) and lines[i+1] and lines[i+1][0] in ['-', '=']) : underline = lines[i+1].strip() if underline == "="*len(underline) : lines[i] = "# " + lines[i].strip() lines[i+1] = "" elif underline == "-"*len(underline) : lines[i] = "## " + lines[i].strip() lines[i+1] = "" return lines HEADER_PREPROCESSOR = HeaderPreprocessor() class LinePreprocessor (Preprocessor): """Deals with HR lines (needs to be done before processing lists)""" def run (self, lines) : for i in range(len(lines)) : if self._isLine(lines[i]) : lines[i] = "
around it - append the paragraph bits directly # onto parent_elem el = parent_elem else : # Otherwise make a "p" element el = self.doc.createElement("p") parent_elem.appendChild(el) for item in list : el.appendChild(item) if theRest : theRest = theRest[1:] # skip the first (blank) line self._processSection(parent_elem, theRest, inList) def _processUList(self, parent_elem, lines, inList) : self._processList(parent_elem, lines, inList, listexpr='ul', tag = 'ul') def _processOList(self, parent_elem, lines, inList) : self._processList(parent_elem, lines, inList, listexpr='ol', tag = 'ol') def _processList(self, parent_elem, lines, inList, listexpr, tag) : """Given a list of document lines starting with a list item, finds the end of the list, breaks it up, and recursively processes each list item and the remainder of the text file. @param parent_elem: A dom element to which the content will be added @param lines: a list of lines @param inList: a level @returns: None""" ul = self.doc.createElement(tag) # ul might actually be '
") and recursively processes the
the remainder of the text file.
@param parent_elem: DOM element to which the content will be added
@param lines: a list of lines
@param inList: a level
@returns: None"""
detabbed, theRest = self.blockGuru.detectTabbed(lines)
pre = self.doc.createElement('pre')
code = self.doc.createElement('code')
parent_elem.appendChild(pre)
pre.appendChild(code)
text = "\n".join(detabbed).rstrip()+"\n"
#text = text.replace("&", "&")
code.appendChild(self.doc.createTextNode(text))
self._processSection(parent_elem, theRest, inList)
def _handleInlineWrapper (self, line) :
parts = [line]
for pattern in self.inlinePatterns :
i = 0
while i < len(parts) :
x = parts[i]
if isinstance(x, (str, unicode)) :
result = self._applyPattern(x, pattern)
if result :
i -= 1
parts.remove(x)
for y in result :
parts.insert(i+1,y)
i += 1
for i in range(len(parts)) :
x = parts[i]
if isinstance(x, (str, unicode)) :
parts[i] = self.doc.createTextNode(x)
return parts
def _handleInline(self, line):
"""Transform a Markdown line with inline elements to an XHTML
fragment.
This function uses auxiliary objects called inline patterns.
See notes on inline patterns above.
@param item: A block of Markdown text
@return: A list of NanoDom nodes """
if not(line):
return [self.doc.createTextNode(' ')]
for pattern in self.inlinePatterns :
list = self._applyPattern( line, pattern)
if list: return list
return [self.doc.createTextNode(line)]
def _applyPattern(self, line, pattern) :
""" Given a pattern name, this function checks if the line
fits the pattern, creates the necessary elements, and returns
back a list consisting of NanoDom elements and/or strings.
@param line: the text to be processed
@param pattern: the pattern to be checked
@returns: the appropriate newly created NanoDom element if the
pattern matches, None otherwise.
"""
# match the line to pattern's pre-compiled reg exp.
# if no match, move on.
m = pattern.getCompiledRegExp().match(line)
if not m :
return None
# if we got a match let the pattern make us a NanoDom node
# if it doesn't, move on
node = pattern.handleMatch(m, self.doc)
# check if any of this nodes have children that need processing
if isinstance(node, Element):
if not node.nodeName in ["code", "pre"] :
for child in node.childNodes :
if isinstance(child, TextNode):
result = self._handleInlineWrapper(child.value)
if result:
if result == [child] :
continue
result.reverse()
#to make insertion easier
position = node.childNodes.index(child)
node.removeChild(child)
for item in result:
if isinstance(item, (str, unicode)):
if len(item) > 0:
node.insertChild(position,
self.doc.createTextNode(item))
else:
node.insertChild(position, item)
if node :
# Those are in the reverse order!
return ( m.groups()[-1], # the string to the left
node, # the new node
m.group(1)) # the string to the right of the match
else :
return None
def convert (self, source = None):
"""Return the document in XHTML format.
@returns: A serialized XHTML body."""
#try :
if source :
self.source = source
if not self.source :
return ""
self.source = removeBOM(self.source, self.encoding)
doc = self._transform()
xml = doc.toxml()
#finally:
# doc.unlink()
# Let's stick in all the raw html pieces
for i in range(self.htmlStash.html_counter) :
html = self.htmlStash.rawHtmlBlocks[i]
if self.safeMode :
html = HTML_REMOVED_TEXT
xml = xml.replace("%s\n
" % (HTML_PLACEHOLDER % i),
html + "\n")
xml = xml.replace(HTML_PLACEHOLDER % i,
html)
# And return everything but the top level tag
if self.stripTopLevelTags :
xml = xml.strip()[23:-7] + "\n"
for pp in self.textPostprocessors :
xml = pp.run(xml)
return self.docType + xml
__str__ = convert # deprecated - will be changed in 1.7 to report
# information about the MD instance
toString = __str__ # toString() method is deprecated
def __unicode__(self):
"""Return the document in XHTML format as a Unicode object.
"""
return str(self)#.decode(self.encoding)
toUnicode = __unicode__ # deprecated - will be removed in 1.7
# ====================================================================
def markdownFromFile(input = None,
output = None,
extensions = [],
encoding = None,
message_threshold = CRITICAL,
safe = False) :
global MESSAGE_THRESHOLD
MESSAGE_THRESHOLD = message_threshold
message(VERBOSE, "input file: %s" % input)
if not encoding :
encoding = "utf-8"
input_file = codecs.open(input, mode="r", encoding=encoding)
text = input_file.read()
input_file.close()
new_text = markdown(text, extensions, encoding, safe_mode = safe)
if output :
output_file = codecs.open(output, "w", encoding=encoding)
output_file.write(new_text)
output_file.close()
else :
sys.stdout.write(new_text.encode(encoding))
def markdown(text,
extensions = [],
encoding = None,
safe_mode = False) :
message(VERBOSE, "in markdown.markdown(), received text:\n%s" % text)
extension_names = []
extension_configs = {}
for ext in extensions :
pos = ext.find("(")
if pos == -1 :
extension_names.append(ext)
else :
name = ext[:pos]
extension_names.append(name)
pairs = [x.split("=") for x in ext[pos+1:-1].split(",")]
configs = [(x.strip(), y.strip()) for (x, y) in pairs]
extension_configs[name] = configs
md = Markdown(extensions=extension_names,
extension_configs=extension_configs,
safe_mode = safe_mode)
return md.convert(text)
class Extension :
def __init__(self, configs = {}) :
self.config = configs
def getConfig(self, key) :
if self.config.has_key(key) :
return self.config[key][0]
else :
return ""
def getConfigInfo(self) :
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value) :
self.config[key][0] = value
OPTPARSE_WARNING = """
Python 2.3 or higher required for advanced command line options.
For lower versions of Python use:
%s INPUT_FILE > OUTPUT_FILE
""" % EXECUTABLE_NAME_FOR_USAGE
def parse_options() :
try :
optparse = __import__("optparse")
except :
if len(sys.argv) == 2 :
return {'input' : sys.argv[1],
'output' : None,
'message_threshold' : CRITICAL,
'safe' : False,
'extensions' : [],
'encoding' : None }
else :
print OPTPARSE_WARNING
return None
parser = optparse.OptionParser(usage="%prog INPUTFILE [options]")
parser.add_option("-f", "--file", dest="filename",
help="write output to OUTPUT_FILE",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="encoding for input and output files",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=NONE, dest="verbose",
help="suppress all messages")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="print info messages")
parser.add_option("-s", "--safe",
action="store_const", const=True, dest="safe",
help="same mode (strip user's HTML tag)")
parser.add_option("--noisy",
action="store_const", const=VERBOSE, dest="verbose",
help="print debug messages")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "load extension EXTENSION", metavar="EXTENSION")
(options, args) = parser.parse_args()
if not len(args) == 1 :
parser.print_help()
return None
else :
input_file = args[0]
if not options.extensions :
options.extensions = []
return {'input' : input_file,
'output' : options.filename,
'message_threshold' : options.verbose,
'safe' : options.safe,
'extensions' : options.extensions,
'encoding' : options.encoding }
if __name__ == '__main__':
""" Run Markdown from the command line. """
options = parse_options()
#if os.access(inFile, os.R_OK):
if not options :
sys.exit(0)
markdownFromFile(**options)