diff --git a/forms.py b/forms.py index 5ef8181..dfcf765 100644 --- a/forms.py +++ b/forms.py @@ -32,7 +32,7 @@ class PostForm(ModelForm): class Meta: model = Post - exclude = ('title_slug', 'creation_date', 'modification_date', 'author', 'blog', 'tags') + exclude = ('title_slug', 'creation_date', 'modification_date', 'author', 'blog', 'tags', 'content_format') def __init__(self, *args, **kwargs): super(PostForm, self).__init__(*args, **kwargs) @@ -46,7 +46,7 @@ class CategoryForm(ModelForm): class UserForm(ModelForm): class Meta: model = User - exclude = ('is_staff', 'is_active', 'last_login', 'last_joined', 'user_permissions', 'groups', 'date_joined', 'password') + exclude = ('password') class CommentForm(ModelForm): class Meta: diff --git a/generators/index.py b/generators/index.py index 6dacdbd..78f7e58 100644 --- a/generators/index.py +++ b/generators/index.py @@ -165,6 +165,7 @@ class Index(DynastieGenerator): return code def createPost(self, posts, dom, post_elem, root): + from dynastie.models import Post post = self.cur_post_obj if post.id in self.hash_posts and not self.first_try: @@ -191,6 +192,9 @@ class Index(DynastieGenerator): f = open(filename, 'rb') post_content = f.read() f.close() + if post.content_format == Post.CONTENT_TEXT: + from dynastie.generators import markdown2 + post_content = markdown2.markdown(post_content) self.hash_posts_content[filename] = post_content else: post_content = self.hash_posts_content[filename] diff --git a/generators/markdown2.py b/generators/markdown2.py new file mode 100644 index 0000000..2f0dee2 --- /dev/null +++ b/generators/markdown2.py @@ -0,0 +1,2320 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Trent Mick. +# Copyright (c) 2007-2008 ActiveState Corp. +# License: MIT (http://www.opensource.org/licenses/mit-license.php) + +from __future__ import generators + +r"""A fast and complete Python implementation of Markdown. + +[from http://daringfireball.net/projects/markdown/] +> Markdown is a text-to-HTML filter; it translates an easy-to-read / +> easy-to-write structured text format into HTML. Markdown's text +> format is most similar to that of plain text email, and supports +> features such as headers, *emphasis*, code blocks, blockquotes, and +> links. +> +> Markdown's syntax is designed not as a generic markup language, but +> specifically to serve as a front-end to (X)HTML. You can use span-level +> HTML tags anywhere in a Markdown document, and you can use block level +> HTML tags (like
tags.
+ """
+ yield 0, ""
+ for tup in inner:
+ yield tup
+ yield 0, "
"
+
+ def wrap(self, source, outfile):
+ """Return the source with a code, pre, and div."""
+ return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
+
+ formatter_opts.setdefault("cssclass", "codehilite")
+ formatter = HtmlCodeFormatter(**formatter_opts)
+ return pygments.highlight(codeblock, lexer, formatter)
+
+ def _code_block_sub(self, match, is_fenced_code_block=False):
+ lexer_name = None
+ if is_fenced_code_block:
+ lexer_name = match.group(1)
+ if lexer_name:
+ formatter_opts = self.extras['fenced-code-blocks'] or {}
+ codeblock = match.group(2)
+ codeblock = codeblock[:-1] # drop one trailing newline
+ else:
+ codeblock = match.group(1)
+ codeblock = self._outdent(codeblock)
+ codeblock = self._detab(codeblock)
+ codeblock = codeblock.lstrip('\n') # trim leading newlines
+ codeblock = codeblock.rstrip() # trim trailing whitespace
+
+ # Note: "code-color" extra is DEPRECATED.
+ if "code-color" in self.extras and codeblock.startswith(":::"):
+ lexer_name, rest = codeblock.split('\n', 1)
+ lexer_name = lexer_name[3:].strip()
+ codeblock = rest.lstrip("\n") # Remove lexer declaration line.
+ formatter_opts = self.extras['code-color'] or {}
+
+ if lexer_name:
+ lexer = self._get_pygments_lexer(lexer_name)
+ if lexer:
+ colored = self._color_with_pygments(codeblock, lexer,
+ **formatter_opts)
+ return "\n\n%s\n\n" % colored
+
+ codeblock = self._encode_code(codeblock)
+ pre_class_str = self._html_class_str_from_tag("pre")
+ code_class_str = self._html_class_str_from_tag("code")
+ return "\n\n%s\n
\n\n" % (
+ pre_class_str, code_class_str, codeblock)
+
+ def _html_class_str_from_tag(self, tag):
+ """Get the appropriate ' class="..."' string (note the leading
+ space), if any, for the given tag.
+ """
+ if "html-classes" not in self.extras:
+ return ""
+ try:
+ html_classes_from_tag = self.extras["html-classes"]
+ except TypeError:
+ return ""
+ else:
+ if tag in html_classes_from_tag:
+ return ' class="%s"' % html_classes_from_tag[tag]
+ return ""
+
+ def _do_code_blocks(self, text):
+ """Process Markdown `` blocks."""
+ code_block_re = re.compile(r'''
+ (?:\n\n|\A\n?)
+ ( # $1 = the code block -- one or more lines, starting with a space/tab
+ (?:
+ (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
+ .*\n+
+ )+
+ )
+ ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
+ ''' % (self.tab_width, self.tab_width),
+ re.M | re.X)
+ return code_block_re.sub(self._code_block_sub, text)
+
+ _fenced_code_block_re = re.compile(r'''
+ (?:\n\n|\A\n?)
+ ^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
+ (.*?) # $2 = code block content
+ ^```[ \t]*\n # closing fence
+ ''', re.M | re.X | re.S)
+
+ def _fenced_code_block_sub(self, match):
+ return self._code_block_sub(match, is_fenced_code_block=True);
+
+ def _do_fenced_code_blocks(self, text):
+ """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
+ return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
+
+ # Rules for a code span:
+ # - backslash escapes are not interpreted in a code span
+ # - to include one or or a run of more backticks the delimiters must
+ # be a longer run of backticks
+ # - cannot start or end a code span with a backtick; pad with a
+ # space and that space will be removed in the emitted HTML
+ # See `test/tm-cases/escapes.text` for a number of edge-case
+ # examples.
+ _code_span_re = re.compile(r'''
+ (?%s
" % c
+
+ def _do_code_spans(self, text):
+ # * Backtick quotes are used for
spans.
+ #
+ # * You can use multiple backticks as the delimiters if you want to
+ # include literal backticks in the code span. So, this input:
+ #
+ # Just type ``foo `bar` baz`` at the prompt.
+ #
+ # Will translate to:
+ #
+ # Just type foo `bar` baz
at the prompt.
+ #
+ # There's no arbitrary limit to the number of backticks you
+ # can use as delimters. If you need three consecutive backticks
+ # in your code, use four for delimiters, etc.
+ #
+ # * You can use spaces to get literal backticks at the edges:
+ #
+ # ... type `` `bar` `` ...
+ #
+ # Turns to:
+ #
+ # ... type `bar`
...
+ return self._code_span_re.sub(self._code_span_sub, text)
+
+ def _encode_code(self, text):
+ """Encode/escape certain characters inside Markdown code runs.
+ The point is that in code, these characters are literals,
+ and lose their special Markdown meanings.
+ """
+ replacements = [
+ # Encode all ampersands; HTML entities are not
+ # entities within a Markdown code span.
+ ('&', '&'),
+ # Do the angle bracket song and dance:
+ ('<', '<'),
+ ('>', '>'),
+ ]
+ for before, after in replacements:
+ text = text.replace(before, after)
+ hashed = _hash_text(text)
+ self._escape_table[text] = hashed
+ return hashed
+
+ _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
+ _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
+ _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
+ _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
+ def _do_italics_and_bold(self, text):
+ # must go first:
+ if "code-friendly" in self.extras:
+ text = self._code_friendly_strong_re.sub(r"\1", text)
+ text = self._code_friendly_em_re.sub(r"\1", text)
+ else:
+ text = self._strong_re.sub(r"\2", text)
+ text = self._em_re.sub(r"\2", text)
+ return text
+
+ # "smarty-pants" extra: Very liberal in interpreting a single prime as an
+ # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
+ # "twixt" can be written without an initial apostrophe. This is fine because
+ # using scare quotes (single quotation marks) is rare.
+ _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
+ _contractions = ["tis", "twas", "twer", "neath", "o", "n",
+ "round", "bout", "twixt", "nuff", "fraid", "sup"]
+ def _do_smart_contractions(self, text):
+ text = self._apostrophe_year_re.sub(r"’\1", text)
+ for c in self._contractions:
+ text = text.replace("'%s" % c, "’%s" % c)
+ text = text.replace("'%s" % c.capitalize(),
+ "’%s" % c.capitalize())
+ return text
+
+ # Substitute double-quotes before single-quotes.
+ _opening_single_quote_re = re.compile(r"(?
+ See "test/tm-cases/smarty_pants.text" for a full discussion of the
+ support here and
+ for a
+ discussion of some diversion from the original SmartyPants.
+ """
+ if "'" in text: # guard for perf
+ text = self._do_smart_contractions(text)
+ text = self._opening_single_quote_re.sub("‘", text)
+ text = self._closing_single_quote_re.sub("’", text)
+
+ if '"' in text: # guard for perf
+ text = self._opening_double_quote_re.sub("“", text)
+ text = self._closing_double_quote_re.sub("”", text)
+
+ text = text.replace("---", "—")
+ text = text.replace("--", "–")
+ text = text.replace("...", "…")
+ text = text.replace(" . . . ", "…")
+ text = text.replace(". . .", "…")
+ return text
+
+ _block_quote_re = re.compile(r'''
+ ( # Wrap whole match in \1
+ (
+ ^[ \t]*>[ \t]? # '>' at the start of a line
+ .+\n # rest of the first line
+ (.+\n)* # subsequent consecutive lines
+ \n* # blanks
+ )+
+ )
+ ''', re.M | re.X)
+ _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
+
+ _html_pre_block_re = re.compile(r'(\s*.+?
)', re.S)
+ def _dedent_two_spaces_sub(self, match):
+ return re.sub(r'(?m)^ ', '', match.group(1))
+
+ def _block_quote_sub(self, match):
+ bq = match.group(1)
+ bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
+ bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
+ bq = self._run_block_gamut(bq) # recurse
+
+ bq = re.sub('(?m)^', ' ', bq)
+ # These leading spaces screw with content, so we need to fix that:
+ bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
+
+ return "\n%s\n
\n\n" % bq
+
+ def _do_block_quotes(self, text):
+ if '>' not in text:
+ return text
+ return self._block_quote_re.sub(self._block_quote_sub, text)
+
+ def _form_paragraphs(self, text):
+ # Strip leading and trailing lines:
+ text = text.strip('\n')
+
+ # Wrap tags.
+ grafs = []
+ for i, graf in enumerate(re.split(r"\n{2,}", text)):
+ if graf in self.html_blocks:
+ # Unhashify HTML blocks
+ grafs.append(self.html_blocks[graf])
+ else:
+ cuddled_list = None
+ if "cuddled-lists" in self.extras:
+ # Need to put back trailing '\n' for `_list_item_re`
+ # match at the end of the paragraph.
+ li = self._list_item_re.search(graf + '\n')
+ # Two of the same list marker in this paragraph: a likely
+ # candidate for a list cuddled to preceding paragraph
+ # text (issue 33). Note the `[-1]` is a quick way to
+ # consider numeric bullets (e.g. "1." and "2.") to be
+ # equal.
+ if (li and len(li.group(2)) <= 3 and li.group("next_marker")
+ and li.group("marker")[-1] == li.group("next_marker")[-1]):
+ start = li.start()
+ cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
+ assert cuddled_list.startswith("
") or cuddled_list.startswith("")
+ graf = graf[:start]
+
+ # Wrap tags.
+ graf = self._run_span_gamut(graf)
+ grafs.append("
" + graf.lstrip(" \t") + "
")
+
+ if cuddled_list:
+ grafs.append(cuddled_list)
+
+ return "\n\n".join(grafs)
+
+ def _add_footnotes(self, text):
+ if self.footnotes:
+ footer = [
+ '',
+ '
',
+ ]
+ for i, id in enumerate(self.footnote_ids):
+ if i != 0:
+ footer.append('')
+ footer.append('- ' % id)
+ footer.append(self._run_block_gamut(self.footnotes[id]))
+ backlink = (''
+ '↩' % (id, i+1))
+ if footer[-1].endswith(""):
+ footer[-1] = footer[-1][:-len("")] \
+ + ' ' + backlink + ""
+ else:
+ footer.append("\n
%s
" % backlink)
+ footer.append(' ')
+ footer.append('')
+ footer.append('')
+ return text + '\n\n' + '\n'.join(footer)
+ else:
+ return text
+
+ # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
+ # http://bumppo.net/projects/amputator/
+ _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
+ _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
+ _naked_gt_re = re.compile(r'''(?''', re.I)
+
+ def _encode_amps_and_angles(self, text):
+ # Smart processing for ampersands and angle brackets that need
+ # to be encoded.
+ text = self._ampersand_re.sub('&', text)
+
+ # Encode naked <'s
+ text = self._naked_lt_re.sub('<', text)
+
+ # Encode naked >'s
+ # Note: Other markdown implementations (e.g. Markdown.pl, PHP
+ # Markdown) don't do this.
+ text = self._naked_gt_re.sub('>', text)
+ return text
+
+ def _encode_backslash_escapes(self, text):
+ for ch, escape in list(self._escape_table.items()):
+ text = text.replace("\\"+ch, escape)
+ return text
+
+ _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
+ def _auto_link_sub(self, match):
+ g1 = match.group(1)
+ return '%s' % (g1, g1)
+
+ _auto_email_link_re = re.compile(r"""
+ <
+ (?:mailto:)?
+ (
+ [-.\w]+
+ \@
+ [-\w]+(\.[-\w]+)*\.[a-z]+
+ )
+ >
+ """, re.I | re.X | re.U)
+ def _auto_email_link_sub(self, match):
+ return self._encode_email_address(
+ self._unescape_special_chars(match.group(1)))
+
+ def _do_auto_links(self, text):
+ text = self._auto_link_re.sub(self._auto_link_sub, text)
+ text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
+ return text
+
+ def _encode_email_address(self, addr):
+ # Input: an email address, e.g. "foo@example.com"
+ #
+ # Output: the email address as a mailto link, with each character
+ # of the address encoded as either a decimal or hex entity, in
+ # the hopes of foiling most address harvesting spam bots. E.g.:
+ #
+ # foo
+ # @example.com
+ #
+ # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
+ # mailing list:
+ chars = [_xml_encode_email_char_at_random(ch)
+ for ch in "mailto:" + addr]
+ # Strip the mailto: from the visible part.
+ addr = '%s' \
+ % (''.join(chars), ''.join(chars[7:]))
+ return addr
+
+ def _do_link_patterns(self, text):
+ """Caveat emptor: there isn't much guarding against link
+ patterns being formed inside other standard Markdown links, e.g.
+ inside a [link def][like this].
+
+ Dev Notes: *Could* consider prefixing regexes with a negative
+ lookbehind assertion to attempt to guard against this.
+ """
+ link_from_hash = {}
+ for regex, repl in self.link_patterns:
+ replacements = []
+ for match in regex.finditer(text):
+ if hasattr(repl, "__call__"):
+ href = repl(match)
+ else:
+ href = match.expand(repl)
+ replacements.append((match.span(), href))
+ for (start, end), href in reversed(replacements):
+ escaped_href = (
+ href.replace('"', '"') # b/c of attr quote
+ # To avoid markdown and :
+ .replace('*', self._escape_table['*'])
+ .replace('_', self._escape_table['_']))
+ link = '%s' % (escaped_href, text[start:end])
+ hash = _hash_text(link)
+ link_from_hash[hash] = link
+ text = text[:start] + hash + text[end:]
+ for hash, link in list(link_from_hash.items()):
+ text = text.replace(hash, link)
+ return text
+
+ def _unescape_special_chars(self, text):
+ # Swap back in all the special characters we've hidden.
+ for ch, hash in list(self._escape_table.items()):
+ text = text.replace(hash, ch)
+ return text
+
+ def _outdent(self, text):
+ # Remove one level of line-leading tabs or spaces
+ return self._outdent_re.sub('', text)
+
+
+class MarkdownWithExtras(Markdown):
+ """A markdowner class that enables most extras:
+
+ - footnotes
+ - code-color (only has effect if 'pygments' Python module on path)
+
+ These are not included:
+ - pyshell (specific to Python-related documenting)
+ - code-friendly (because it *disables* part of the syntax)
+ - link-patterns (because you need to specify some actual
+ link-patterns anyway)
+ """
+ extras = ["footnotes", "code-color"]
+
+
+#---- internal support functions
+
+class UnicodeWithAttrs(unicode):
+ """A subclass of unicode used for the return value of conversion to
+ possibly attach some attributes. E.g. the "toc_html" attribute when
+ the "toc" extra is used.
+ """
+ metadata = None
+ _toc = None
+ def toc_html(self):
+ """Return the HTML for the current TOC.
+
+ This expects the `_toc` attribute to have been set on this instance.
+ """
+ if self._toc is None:
+ return None
+
+ def indent():
+ return ' ' * (len(h_stack) - 1)
+ lines = []
+ h_stack = [0] # stack of header-level numbers
+ for level, id, name in self._toc:
+ if level > h_stack[-1]:
+ lines.append("%s" % indent())
+ h_stack.append(level)
+ elif level == h_stack[-1]:
+ lines[-1] += ""
+ else:
+ while level < h_stack[-1]:
+ h_stack.pop()
+ if not lines[-1].endswith(""):
+ lines[-1] += ""
+ lines.append("%s
" % indent())
+ lines.append('%s- %s' % (
+ indent(), id, name))
+ while len(h_stack) > 1:
+ h_stack.pop()
+ if not lines[-1].endswith("
"):
+ lines[-1] += ""
+ lines.append("%s
" % indent())
+ return '\n'.join(lines) + '\n'
+ toc_html = property(toc_html)
+
+## {{{ http://code.activestate.com/recipes/577257/ (r1)
+_slugify_strip_re = re.compile(r'[^\w\s-]')
+_slugify_hyphenate_re = re.compile(r'[-\s]+')
+def _slugify(value):
+ """
+ Normalizes string, converts to lowercase, removes non-alpha characters,
+ and converts spaces to hyphens.
+
+ From Django's "django/template/defaultfilters.py".
+ """
+ import unicodedata
+ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
+ value = _slugify_strip_re.sub('', value).strip().lower()
+ return _slugify_hyphenate_re.sub('-', value)
+## end of http://code.activestate.com/recipes/577257/ }}}
+
+
+# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
+def _curry(*args, **kwargs):
+ function, args = args[0], args[1:]
+ def result(*rest, **kwrest):
+ combined = kwargs.copy()
+ combined.update(kwrest)
+ return function(*args + rest, **combined)
+ return result
+
+# Recipe: regex_from_encoded_pattern (1.0)
+def _regex_from_encoded_pattern(s):
+ """'foo' -> re.compile(re.escape('foo'))
+ '/foo/' -> re.compile('foo')
+ '/foo/i' -> re.compile('foo', re.I)
+ """
+ if s.startswith('/') and s.rfind('/') != 0:
+ # Parse it: /PATTERN/FLAGS
+ idx = s.rfind('/')
+ pattern, flags_str = s[1:idx], s[idx+1:]
+ flag_from_char = {
+ "i": re.IGNORECASE,
+ "l": re.LOCALE,
+ "s": re.DOTALL,
+ "m": re.MULTILINE,
+ "u": re.UNICODE,
+ }
+ flags = 0
+ for char in flags_str:
+ try:
+ flags |= flag_from_char[char]
+ except KeyError:
+ raise ValueError("unsupported regex flag: '%s' in '%s' "
+ "(must be one of '%s')"
+ % (char, s, ''.join(list(flag_from_char.keys()))))
+ return re.compile(s[1:idx], flags)
+ else: # not an encoded regex
+ return re.compile(re.escape(s))
+
+# Recipe: dedent (0.1.2)
+def _dedentlines(lines, tabsize=8, skip_first_line=False):
+ """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
+
+ "lines" is a list of lines to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ Same as dedent() except operates on a sequence of lines. Note: the
+ lines list is modified **in-place**.
+ """
+ DEBUG = False
+ if DEBUG:
+ print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
+ % (tabsize, skip_first_line))
+ indents = []
+ margin = None
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ indent = 0
+ for ch in line:
+ if ch == ' ':
+ indent += 1
+ elif ch == '\t':
+ indent += tabsize - (indent % tabsize)
+ elif ch in '\r\n':
+ continue # skip all-whitespace lines
+ else:
+ break
+ else:
+ continue # skip all-whitespace lines
+ if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
+ if margin is None:
+ margin = indent
+ else:
+ margin = min(margin, indent)
+ if DEBUG: print("dedent: margin=%r" % margin)
+
+ if margin is not None and margin > 0:
+ for i, line in enumerate(lines):
+ if i == 0 and skip_first_line: continue
+ removed = 0
+ for j, ch in enumerate(line):
+ if ch == ' ':
+ removed += 1
+ elif ch == '\t':
+ removed += tabsize - (removed % tabsize)
+ elif ch in '\r\n':
+ if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
+ lines[i] = lines[i][j:]
+ break
+ else:
+ raise ValueError("unexpected non-whitespace char %r in "
+ "line %r while removing %d-space margin"
+ % (ch, line, margin))
+ if DEBUG:
+ print("dedent: %r: %r -> removed %d/%d"\
+ % (line, ch, removed, margin))
+ if removed == margin:
+ lines[i] = lines[i][j+1:]
+ break
+ elif removed > margin:
+ lines[i] = ' '*(removed-margin) + lines[i][j+1:]
+ break
+ else:
+ if removed:
+ lines[i] = lines[i][removed:]
+ return lines
+
+def _dedent(text, tabsize=8, skip_first_line=False):
+ """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
+
+ "text" is the text to dedent.
+ "tabsize" is the tab width to use for indent width calculations.
+ "skip_first_line" is a boolean indicating if the first line should
+ be skipped for calculating the indent width and for dedenting.
+ This is sometimes useful for docstrings and similar.
+
+ textwrap.dedent(s), but don't expand tabs to spaces
+ """
+ lines = text.splitlines(1)
+ _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
+ return ''.join(lines)
+
+
+class _memoized(object):
+ """Decorator that caches a function's return value each time it is called.
+ If called later with the same arguments, the cached value is returned, and
+ not re-evaluated.
+
+ http://wiki.python.org/moin/PythonDecoratorLibrary
+ """
+ def __init__(self, func):
+ self.func = func
+ self.cache = {}
+ def __call__(self, *args):
+ try:
+ return self.cache[args]
+ except KeyError:
+ self.cache[args] = value = self.func(*args)
+ return value
+ except TypeError:
+ # uncachable -- for instance, passing a list as an argument.
+ # Better to not cache than to blow up entirely.
+ return self.func(*args)
+ def __repr__(self):
+ """Return the function's docstring."""
+ return self.func.__doc__
+
+
+def _xml_oneliner_re_from_tab_width(tab_width):
+ """Standalone XML processing instruction regex."""
+ return re.compile(r"""
+ (?:
+ (?<=\n\n) # Starting after a blank line
+ | # or
+ \A\n? # the beginning of the doc
+ )
+ ( # save in $1
+ [ ]{0,%d}
+ (?:
+ <\?\w+\b\s+.*?\?> # XML processing instruction
+ |
+ <\w+:\w+\b\s+.*?/> # namespaced single tag
+ )
+ [ \t]*
+ (?=\n{2,}|\Z) # followed by a blank line or end of document
+ )
+ """ % (tab_width - 1), re.X)
+_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
+
+def _hr_tag_re_from_tab_width(tab_width):
+ return re.compile(r"""
+ (?:
+ (?<=\n\n) # Starting after a blank line
+ | # or
+ \A\n? # the beginning of the doc
+ )
+ ( # save in \1
+ [ ]{0,%d}
+ <(hr) # start tag = \2
+ \b # word break
+ ([^<>])*? #
+ /?> # the matching end tag
+ [ \t]*
+ (?=\n{2,}|\Z) # followed by a blank line or end of document
+ )
+ """ % (tab_width - 1), re.X)
+_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
+
+
+def _xml_escape_attr(attr, skip_single_quote=True):
+ """Escape the given string for use in an HTML/XML tag attribute.
+
+ By default this doesn't bother with escaping `'` to `'`, presuming that
+ the tag attribute is surrounded by double quotes.
+ """
+ escaped = (attr
+ .replace('&', '&')
+ .replace('"', '"')
+ .replace('<', '<')
+ .replace('>', '>'))
+ if not skip_single_quote:
+ escaped = escaped.replace("'", "'")
+ return escaped
+
+
+def _xml_encode_email_char_at_random(ch):
+ r = random()
+ # Roughly 10% raw, 45% hex, 45% dec.
+ # '@' *must* be encoded. I [John Gruber] insist.
+ # Issue 26: '_' must be encoded.
+ if r > 0.9 and ch not in "@_":
+ return ch
+ elif r < 0.45:
+ # The [1:] is to drop leading '0': 0x63 -> x63
+ return '%s;' % hex(ord(ch))[1:]
+ else:
+ return '%s;' % ord(ch)
+
+
+
+#---- mainline
+
+class _NoReflowFormatter(optparse.IndentedHelpFormatter):
+ """An optparse formatter that does NOT reflow the description."""
+ def format_description(self, description):
+ return description or ""
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+ if not logging.root.handlers:
+ logging.basicConfig()
+
+ usage = "usage: %prog [PATHS...]"
+ version = "%prog "+__version__
+ parser = optparse.OptionParser(prog="markdown2", usage=usage,
+ version=version, description=cmdln_desc,
+ formatter=_NoReflowFormatter())
+ parser.add_option("-v", "--verbose", dest="log_level",
+ action="store_const", const=logging.DEBUG,
+ help="more verbose output")
+ parser.add_option("--encoding",
+ help="specify encoding of text content")
+ parser.add_option("--html4tags", action="store_true", default=False,
+ help="use HTML 4 style for empty element tags")
+ parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
+ help="sanitize literal HTML: 'escape' escapes "
+ "HTML meta chars, 'replace' replaces with an "
+ "[HTML_REMOVED] note")
+ parser.add_option("-x", "--extras", action="append",
+ help="Turn on specific extra features (not part of "
+ "the core Markdown spec). See above.")
+ parser.add_option("--use-file-vars",
+ help="Look for and use Emacs-style 'markdown-extras' "
+ "file var to turn on extras. See "
+ "")
+ parser.add_option("--link-patterns-file",
+ help="path to a link pattern file")
+ parser.add_option("--self-test", action="store_true",
+ help="run internal self-tests (some doctests)")
+ parser.add_option("--compare", action="store_true",
+ help="run against Markdown.pl as well (for testing)")
+ parser.set_defaults(log_level=logging.INFO, compare=False,
+ encoding="utf-8", safe_mode=None, use_file_vars=False)
+ opts, paths = parser.parse_args()
+ log.setLevel(opts.log_level)
+
+ if opts.self_test:
+ return _test()
+
+ if opts.extras:
+ extras = {}
+ for s in opts.extras:
+ splitter = re.compile("[,;: ]+")
+ for e in splitter.split(s):
+ if '=' in e:
+ ename, earg = e.split('=', 1)
+ try:
+ earg = int(earg)
+ except ValueError:
+ pass
+ else:
+ ename, earg = e, None
+ extras[ename] = earg
+ else:
+ extras = None
+
+ if opts.link_patterns_file:
+ link_patterns = []
+ f = open(opts.link_patterns_file)
+ try:
+ for i, line in enumerate(f.readlines()):
+ if not line.strip(): continue
+ if line.lstrip().startswith("#"): continue
+ try:
+ pat, href = line.rstrip().rsplit(None, 1)
+ except ValueError:
+ raise MarkdownError("%s:%d: invalid link pattern line: %r"
+ % (opts.link_patterns_file, i+1, line))
+ link_patterns.append(
+ (_regex_from_encoded_pattern(pat), href))
+ finally:
+ f.close()
+ else:
+ link_patterns = None
+
+ from os.path import join, dirname, abspath, exists
+ markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
+ "Markdown.pl")
+ if not paths:
+ paths = ['-']
+ for path in paths:
+ if path == '-':
+ text = sys.stdin.read()
+ else:
+ fp = codecs.open(path, 'r', opts.encoding)
+ text = fp.read()
+ fp.close()
+ if opts.compare:
+ from subprocess import Popen, PIPE
+ print("==== Markdown.pl ====")
+ p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
+ p.stdin.write(text.encode('utf-8'))
+ p.stdin.close()
+ perl_html = p.stdout.read().decode('utf-8')
+ if py3:
+ sys.stdout.write(perl_html)
+ else:
+ sys.stdout.write(perl_html.encode(
+ sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ print("==== markdown2.py ====")
+ html = markdown(text,
+ html4tags=opts.html4tags,
+ safe_mode=opts.safe_mode,
+ extras=extras, link_patterns=link_patterns,
+ use_file_vars=opts.use_file_vars)
+ if py3:
+ sys.stdout.write(html)
+ else:
+ sys.stdout.write(html.encode(
+ sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ if extras and "toc" in extras:
+ log.debug("toc_html: " +
+ html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+ if opts.compare:
+ test_dir = join(dirname(dirname(abspath(__file__))), "test")
+ if exists(join(test_dir, "test_markdown2.py")):
+ sys.path.insert(0, test_dir)
+ from test_markdown2 import norm_html_from_html
+ norm_html = norm_html_from_html(html)
+ norm_perl_html = norm_html_from_html(perl_html)
+ else:
+ norm_html = html
+ norm_perl_html = perl_html
+ print("==== match? %r ====" % (norm_perl_html == norm_html))
+
+
+if __name__ == "__main__":
+ sys.exit( main(sys.argv) )
diff --git a/models.py b/models.py
index 99ac6a9..464045c 100644
--- a/models.py
+++ b/models.py
@@ -235,6 +235,12 @@ class Post(models.Model):
keywords = models.TextField(blank=True)
tags = models.ManyToManyField(Tag, blank=True, null=True)
blog = models.ForeignKey(Blog)
+ CONTENT_HTML = 0
+ CONTENT_TEXT = 1
+ CONTENT_FORMAT = (
+ (CONTENT_HTML, 'HTML'),
+ (CONTENT_TEXT, 'Text'))
+ content_format = models.IntegerField(choices=CONTENT_FORMAT, default=CONTENT_HTML, blank=False, null=False)
def getPath(self):
filename = '/post/'
@@ -355,6 +361,13 @@ class Post(models.Model):
if os.path.exists(filename) and len(os.listdir(filename)) == 0:
os.rmdir(filename)
+
+ def get_editor(self):
+ if self.content_format == Post.CONTENT_HTML:
+ return 'html'
+ else:
+ return 'text'
+
class Comment(models.Model):
post = models.ForeignKey(Post)
parent = models.ForeignKey('self', null=True, blank=True)
diff --git a/sites/blog.soutade.fr/css/blog.css b/sites/blog.soutade.fr/css/blog.css
index 83212a1..e765619 100755
--- a/sites/blog.soutade.fr/css/blog.css
+++ b/sites/blog.soutade.fr/css/blog.css
@@ -549,3 +549,15 @@ div.all_posts div.post
margin-bottom:2px;
margin-left:20px;
}
+
+.post_content > p > img
+{
+ display:block;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+.post_content > p
+{
+ text-align:justify;
+}
diff --git a/static/css/dynastie.css b/static/css/dynastie.css
index f655369..60245ba 100644
--- a/static/css/dynastie.css
+++ b/static/css/dynastie.css
@@ -2,4 +2,9 @@
{
color:green;
font-weight:bold;
+}
+
+.markdown_help
+{
+ padding-right:20px;
}
\ No newline at end of file
diff --git a/static/js/dynastie.js b/static/js/dynastie.js
index 73d7064..e1d6017 100644
--- a/static/js/dynastie.js
+++ b/static/js/dynastie.js
@@ -67,4 +67,22 @@ function addTag()
else
text_tags.value = cur_elem;
}
+}
+
+function switchEditor()
+{
+ options = document.getElementById("editor");
+ help = document.getElementById("markdown_help");
+
+ if (options.selectedIndex == 0) // HTML
+ {
+ tinyMCE.execCommand('mceAddControl', false, 'content');
+ help.style.display="none";
+ // tinymce.execCommand('mceToggleEditor',true,'content');
+ } else // Text
+ {
+ tinyMCE.execCommand('mceRemoveControl', false, 'content');
+ help.style.display="block";
+ // tinymce.execCommand('mceToggleEditor',false,'content');
+ }
}
\ No newline at end of file
diff --git a/templates/add_post.html b/templates/add_post.html
index 1843289..0e0edab 100644
--- a/templates/add_post.html
+++ b/templates/add_post.html
@@ -17,7 +17,69 @@ Available tags:
{% endfor %}
-
+
Editor
+{% if editor == "html" %}
+
+{% else %}
+
+{% endif %}
+
+{% if editor == "html" %}
+
+{% if editor == "html" %}
+