aboutsummaryrefslogtreecommitdiffstats
path: root/test-markdown.py
diff options
context:
space:
mode:
authorYuri Takhteyev <yuri@freewisdom.org>2008-12-07 02:28:16 -0800
committerYuri Takhteyev <yuri@freewisdom.org>2008-12-07 02:28:16 -0800
commit33045f39f394d73d5e742d49d11e967946e34153 (patch)
treeceeac5f9323969d25c51cc68448823470a0e4e85 /test-markdown.py
parentcd73ef768613ae2b3683f8e926f405562bc37e39 (diff)
downloadmarkdown-33045f39f394d73d5e742d49d11e967946e34153.tar.gz
markdown-33045f39f394d73d5e742d49d11e967946e34153.tar.bz2
markdown-33045f39f394d73d5e742d49d11e967946e34153.zip
Refactoring test and getting logging to work properly when etree fails to load.
Diffstat (limited to 'test-markdown.py')
-rwxr-xr-xtest-markdown.py273
1 files changed, 112 insertions, 161 deletions
diff --git a/test-markdown.py b/test-markdown.py
index d2548a0..e5a4374 100755
--- a/test-markdown.py
+++ b/test-markdown.py
@@ -3,8 +3,11 @@
import os, difflib, time, gc, codecs, platform, sys
from pprint import pprint
import textwrap
-
-markdown = None
+
+# Setup a logger manually for compatibility with Python 2.3
+import logging
+logging.getLogger('MARKDOWN').addHandler(logging.StreamHandler())
+import markdown
TEST_DIR = "tests"
TMP_DIR = "./tmp/"
@@ -64,49 +67,36 @@ def stacksize(since=0.0):
############################################################
DIFF_FILE_TEMPLATE = """
-
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
-
<style>
-
td {
padding-left: 10px;
padding-right: 10px;
}
-
colgroup {
margin: 10px;
}
-
.diff_header {
color: gray;
}
-
.ok {
color: green;
}
-
.gray {
color: gray;
}
-
.failed a {
color: red;
}
-
.failed {
color: red;
}
-
</style>
-
</head>
<body>
-
<h1>Results Summary</h1>
-
<table rules="groups" >
<colgroup></colgroup>
<colgroup></colgroup>
@@ -120,20 +110,14 @@ DIFF_FILE_TEMPLATE = """
<td>Memory</td>
</th>
<tbody>
-
-"""
+ """
FOOTER = """
-
</body>
</html>
-
-
"""
DIFF_TABLE_TEMPLATE = """
-
-
<table class="diff" rules="groups" >
<colgroup></colgroup>
<colgroup></colgroup>
@@ -141,7 +125,6 @@ DIFF_TABLE_TEMPLATE = """
<colgroup></colgroup>
<colgroup></colgroup>
<colgroup></colgroup>
-
<th>
<td></td>
<td>Expected</td>
@@ -155,234 +138,202 @@ DIFF_TABLE_TEMPLATE = """
</table>
"""
-def smart_split(text) :
+def smart_split(text) :
result = []
-
for x in text.splitlines() :
for y in textwrap.wrap(textwrap.dedent(x), 40):
result.append(y)
-
return result
+differ = difflib.Differ()
+try :
+ htmldiff = difflib.HtmlDiff()
+except:
+ htmldiff = None
-def testDirectory(dir, measure_time=False, safe_mode=False) :
+class TestRunner :
- encoding = "utf8"
+ def __init__ (self) :
+ self.failedTests = []
+ if not os.path.exists(TMP_DIR):
+ os.mkdir(TMP_DIR)
+ def test_directory(self, dir, measure_time=False, safe_mode=False, encoding = "utf8") :
+ self.encoding = encoding
+ benchmark_file_name = os.path.join(dir, "benchmark.dat")
+ self.saved_benchmarks = {}
- benchmark_file_name = os.path.join(dir, "benchmark.dat")
+ if measure_time :
+ if os.path.exists(benchmark_file_name) :
+ file = open(benchmark_file_name)
+ for line in file.readlines() :
+ test, str_time, str_mem = line.strip().split(":")
+ self.saved_benchmarks[test] = (float(str_time), float(str_mem))
+ repeat = range(10)
+ else :
+ repeat = (0,)
- saved_benchmarks = {}
+ # First, determine from the name of the directory if any extensions
+ # need to be loaded.
- if measure_time :
+ parts = os.path.split(dir)[-1].split("-x-")
+ if len(parts) > 1 :
+ extensions = parts[1].split("-")
+ print extensions
+ else :
+ extensions = []
- if os.path.exists(benchmark_file_name) :
+ mem = memory()
+ start = time.clock()
+ self.md = markdown.Markdown(extensions=extensions, safe_mode = safe_mode)
+ construction_time = time.clock() - start
+ construction_mem = memory(mem)
- file = open(benchmark_file_name)
- for line in file.readlines() :
- test, str_time, str_mem = line.strip().split(":")
- saved_benchmarks[test] = (float(str_time), float(str_mem))
-
- if measure_time :
- repeat = range(10)
- else :
- repeat = (0,)
+ self.benchmark_buffer = "construction:%f:%f\n" % (construction_time,
+ construction_mem)
- # First, determine from the name of the directory if any extensions
- # need to be loaded.
+ html_diff_file_path = os.path.join(TMP_DIR, os.path.split(dir)[-1]) + ".html"
+ self.html_diff_file = codecs.open(html_diff_file_path, "w", encoding=encoding)
+ self.html_diff_file.write(DIFF_FILE_TEMPLATE)
- parts = os.path.split(dir)[-1].split("-x-")
- if len(parts) > 1 :
- extensions = parts[1].split("-")
- print extensions
- else :
- extensions = []
+ self.diffs_buffer = ""
- mem = memory()
- t = time.clock()
- md = markdown.Markdown(extensions=extensions, safe_mode = safe_mode)
- construction_time = time.clock() - t
- construction_mem = memory(mem)
+ tests = [x.replace(".txt", "")
+ for x in os.listdir(dir) if x.endswith(".txt")]
+ tests.sort()
+ for test in tests :
+ self.run_test(dir, test, repeat)
- benchmark_buffer = "construction:%f:%f\n" % (construction_time,
- construction_mem)
+ self.html_diff_file.write("</table>")
- tests = [x.replace(".txt", "")
- for x in os.listdir(dir) if x.endswith(".txt")]
- tests.sort()
+ if sys.version < "3.0":
+ self.html_diff_file.write(self.diffs_buffer.decode("utf8"))
- d = difflib.Differ()
- try :
- hd = difflib.HtmlDiff()
- except:
- hd = None
+ self.html_diff_file.write(FOOTER)
+ self.html_diff_file.close()
+ print "Diff written to %s" % html_diff_file_path
- if not os.path.exists(TMP_DIR):
- os.mkdir(TMP_DIR)
+ benchmark_output_file_name = benchmark_file_name
- htmlDiffFilePath = os.path.join(TMP_DIR, os.path.split(dir)[-1]) + ".html"
- htmlDiffFile = codecs.open(htmlDiffFilePath, "w", encoding=encoding)
- htmlDiffFile.write(DIFF_FILE_TEMPLATE)
+ if not WRITE_BENCHMARK:
+ benchmark_output_file_name += ".tmp"
- diffsBuffer = ""
+ self.benchmark_file = open(benchmark_output_file_name, "w")
+ self.benchmark_file.write(self.benchmark_buffer)
+ self.benchmark_file.close()
-
- for test in tests :
- print "--- %s ---" % test
+####################
+
- htmlDiffFile.write("<tr><td>%s</td>" % test)
+ def run_test(self, dir, test, repeat) :
+ print "--- %s ---" % test
+ self.html_diff_file.write("<tr><td>%s</td>" % test)
input_file = os.path.join(dir, test + ".txt")
output_file = os.path.join(dir, test + ".html")
- expected_output = codecs.open(output_file, encoding=encoding).read()
-
- input = codecs.open(input_file, encoding=encoding).read()
-
+ expected_output = codecs.open(output_file, encoding=self.encoding).read()
+ input = codecs.open(input_file, encoding=self.encoding).read()
actual_output = ""
actual_lines = []
- md.source = ""
+ self.md.source = ""
gc.collect()
mem = memory()
- t = time.clock()
+ start = time.clock()
for x in repeat:
- actual_output = md.convert(input)
- conversion_time = time.clock() - t
+ actual_output = self.md.convert(input)
+ conversion_time = time.clock() - start
conversion_mem = memory(mem)
- md.reset()
+ self.md.reset()
expected_lines = [x.encode("utf8") for x in smart_split(expected_output)]
actual_lines = [x.encode("utf8") for x in smart_split(actual_output)]
-
#diff = difflib.ndiff(expected_output.split("\n"),
# actual_output.split("\n"))
-
-
-
-
- diff = [x for x in d.compare(expected_lines,
+ diff = [x for x in differ.compare(expected_lines,
actual_lines)
if not x.startswith(" ")]
if not diff:
-
- htmlDiffFile.write("<td class='ok'>OK</td>")
-
+ self.html_diff_file.write("<td class='ok'>OK</td>")
else :
-
- failedTests.append(test)
- htmlDiffFile.write("<td class='failed'>" +
+ self.failedTests.append(test)
+ self.html_diff_file.write("<td class='failed'>" +
"<a href='#diff-%s'>FAILED</a></td>" % test)
-
print "MISMATCH on %s/%s.txt" % (dir, test)
print
for line in diff :
print line
-
- if hd!=None :
- htmlDiff = hd.make_table(expected_lines, actual_lines,
+ if htmldiff!=None :
+ htmlDiff = htmldiff.make_table(expected_lines, actual_lines,
context=True)
-
htmlDiff = "\n".join( [x for x in htmlDiff.splitlines()
if x.strip().startswith("<tr>")] )
+ self.diffs_buffer += "<a name='diff-%s'/><h2>%s</h2>" % (test, test)
+ self.diffs_buffer += DIFF_TABLE_TEMPLATE % htmlDiff
- diffsBuffer += "<a name='diff-%s'/><h2>%s</h2>" % (test, test)
- diffsBuffer += DIFF_TABLE_TEMPLATE % htmlDiff
+ expected_time, expected_mem = self.saved_benchmarks.get(test, ("na", "na"))
- expected_time, expected_mem = saved_benchmarks.get(test, ("na", "na"))
+ self.html_diff_file.write(get_benchmark_html(conversion_time, expected_time))
+ self.html_diff_file.write(get_benchmark_html(conversion_mem, expected_mem))
+ self.html_diff_file.write("</tr>\n")
- htmlDiffFile.write(get_benchmark_html(conversion_time, expected_time))
- htmlDiffFile.write(get_benchmark_html(conversion_mem, expected_mem))
- htmlDiffFile.write("</tr>\n")
-
-
- benchmark_buffer += "%s:%f:%f\n" % (test,
+ self.benchmark_buffer += "%s:%f:%f\n" % (test,
conversion_time, conversion_mem)
- htmlDiffFile.write("</table>")
-
- if sys.version < "3.0":
- htmlDiffFile.write(diffsBuffer.decode("utf8"))
- htmlDiffFile.write(FOOTER)
- htmlDiffFile.close()
- print "Diff written to %s" % htmlDiffFilePath
-
- benchmark_output_file_name = benchmark_file_name
-
- if not WRITE_BENCHMARK:
- benchmark_output_file_name += ".tmp"
-
- benchmark_file = open(benchmark_output_file_name, "w")
- benchmark_file.write(benchmark_buffer)
- benchmark_file.close()
def get_benchmark_html (actual, expected) :
-
buffer = ""
-
if not expected == "na":
-
if actual > expected * 1.5:
tdiff = "failed"
elif actual * 1.5 < expected :
tdiff = "ok"
else :
tdiff = "same"
-
if ( (actual <= 0 and expected < 0.015) or
(expected <= 0 and actual < 0.015)) :
tdiff = "same"
-
else :
tdiff = "same"
-
buffer += "<td class='%s'>%.2f</td>" % (tdiff, actual)
-
if not expected == "na":
buffer += "<td class='gray'>%.2f</td>" % (expected)
-
return buffer
-MARKDOWN_FILE = "markdown.py"
-
-
-if MARKDOWN_FILE.endswith(".py") :
- MARKDOWN_FILE = MARKDOWN_FILE[:-3]
-
-print MARKDOWN_FILE
+def run_tests() :
+
+ tester = TestRunner()
+ #test.test_directory("tests/basic")
+ tester.test_directory("tests/markdown-test", measure_time=True)
+ tester.test_directory("tests/misc", measure_time=True)
+ tester.test_directory("tests/extensions-x-tables")
+ tester.test_directory("tests/extensions-x-footnotes")
+ #tester.test_directory("tests/extensions-x-ext1-ext2")
+ tester.test_directory("tests/safe_mode", measure_time=True, safe_mode="escape")
+ tester.test_directory("tests/extensions-x-codehilite")
+ tester.test_directory("tests/extensions-x-wikilinks")
+ tester.test_directory("tests/extensions-x-toc")
+ tester.test_directory("tests/extensions-x-def_list")
+ tester.test_directory("tests/extensions-x-abbr")
+ print "\n### Final result ###"
+ if len(tester.failedTests):
+ print "%d failed tests: %s" % (len(tester.failedTests), str(tester.failedTests))
+ else:
+ print "All tests passed, no errors!"
-markdown = __import__(MARKDOWN_FILE)
-
-failedTests = []
-
-#testDirectory("tests/basic")
-testDirectory("tests/markdown-test", measure_time=True)
+run_tests()
-testDirectory("tests/misc", measure_time=True)
-testDirectory("tests/extensions-x-tables")
-testDirectory("tests/extensions-x-footnotes")
-#testDirectory("tests/extensions-x-ext1-ext2")
-testDirectory("tests/safe_mode", measure_time=True, safe_mode="escape")
-testDirectory("tests/extensions-x-codehilite")
-testDirectory("tests/extensions-x-wikilinks")
-testDirectory("tests/extensions-x-toc")
-testDirectory("tests/extensions-x-def_list")
-testDirectory("tests/extensions-x-abbr")
-print "\n### Final result ###"
-if len(failedTests):
- print "%d failed tests: %s" % (len(failedTests), str(failedTests))
-else:
- print "All tests passed, no errors!"