Port pelican to python 3.

Stays compatible with 2.x series, thanks to an unified codebase.
This commit is contained in:
Dirk Makowski 2013-01-11 02:57:43 +01:00 committed by Alexis Métaireau
commit 71995d5e1b
43 changed files with 495 additions and 287 deletions

2
.gitignore vendored
View file

@ -11,3 +11,5 @@ tags
.tox .tox
.coverage .coverage
htmlcov htmlcov
six-*.egg/
*.orig

View file

@ -2,11 +2,13 @@ language: python
python: python:
- "2.6" - "2.6"
- "2.7" - "2.7"
# - "3.2"
before_install: before_install:
- sudo apt-get update -qq - sudo apt-get update -qq
- sudo apt-get install -qq ruby-sass - sudo apt-get install -qq ruby-sass
install: install:
- pip install nose unittest2 mock --use-mirrors - pip install nose mock --use-mirrors
- if [[ $TRAVIS_PYTHON_VERSION == '3.2' ]]; then pip install --use-mirrors unittest2py3k; else pip install --use-mirrors unittest2; fi
- pip install . --use-mirrors - pip install . --use-mirrors
- pip install Markdown - pip install Markdown
- pip install webassets - pip install webassets

View file

@ -1,8 +1,9 @@
# Tests # Tests
unittest2
mock mock
# Optional Packages # Optional Packages
Markdown Markdown
BeautifulSoup BeautifulSoup4
lxml
typogrify typogrify
webassets webassets

View file

@ -4,7 +4,7 @@ Release history
3.2 (XXXX-XX-XX) 3.2 (XXXX-XX-XX)
================ ================
* [...] * Support for Python 3!
3.1 (2012-12-04) 3.1 (2012-12-04)
================ ================

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os import sys, os
sys.path.append(os.path.abspath('..')) sys.path.append(os.path.abspath('..'))
@ -10,8 +11,8 @@ templates_path = ['_templates']
extensions = ['sphinx.ext.autodoc',] extensions = ['sphinx.ext.autodoc',]
source_suffix = '.rst' source_suffix = '.rst'
master_doc = 'index' master_doc = 'index'
project = u'Pelican' project = 'Pelican'
copyright = u'2010, Alexis Metaireau and contributors' copyright = '2010, Alexis Metaireau and contributors'
exclude_patterns = ['_build'] exclude_patterns = ['_build']
version = __version__ version = __version__
release = __major__ release = __major__
@ -34,16 +35,16 @@ htmlhelp_basename = 'Pelicandoc'
# -- Options for LaTeX output -------------------------------------------------- # -- Options for LaTeX output --------------------------------------------------
latex_documents = [ latex_documents = [
('index', 'Pelican.tex', u'Pelican Documentation', ('index', 'Pelican.tex', 'Pelican Documentation',
u'Alexis Métaireau', 'manual'), 'Alexis Métaireau', 'manual'),
] ]
# -- Options for manual page output -------------------------------------------- # -- Options for manual page output --------------------------------------------
man_pages = [ man_pages = [
('index', 'pelican', u'pelican documentation', ('index', 'pelican', 'pelican documentation',
[u'Alexis Métaireau'], 1), ['Alexis Métaireau'], 1),
('pelican-themes', 'pelican-themes', u'A theme manager for Pelican', ('pelican-themes', 'pelican-themes', 'A theme manager for Pelican',
[u'Mickaël Raybaud'], 1), ['Mickaël Raybaud'], 1),
('themes', 'pelican-theming', u'How to create themes for Pelican', ('themes', 'pelican-theming', 'How to create themes for Pelican',
[u'The Pelican contributors'], 1) ['The Pelican contributors'], 1)
] ]

View file

@ -1,3 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os import os
import re import re
import sys import sys
@ -55,7 +59,7 @@ class Pelican(object):
self.plugins = self.settings['PLUGINS'] self.plugins = self.settings['PLUGINS']
for plugin in self.plugins: for plugin in self.plugins:
# if it's a string, then import it # if it's a string, then import it
if isinstance(plugin, basestring): if isinstance(plugin, six.string_types):
logger.debug("Loading plugin `{0}' ...".format(plugin)) logger.debug("Loading plugin `{0}' ...".format(plugin))
plugin = __import__(plugin, globals(), locals(), 'module') plugin = __import__(plugin, globals(), locals(), 'module')
@ -265,7 +269,7 @@ def get_instance(args):
settings = read_settings(args.settings, override=get_config(args)) settings = read_settings(args.settings, override=get_config(args))
cls = settings.get('PELICAN_CLASS') cls = settings.get('PELICAN_CLASS')
if isinstance(cls, basestring): if isinstance(cls, six.string_types):
module, cls_name = cls.rsplit('.', 1) module, cls_name = cls.rsplit('.', 1)
module = __import__(module) module = __import__(module)
cls = getattr(module, cls_name) cls = getattr(module, cls_name)
@ -311,15 +315,15 @@ def main():
"Nothing to generate.") "Nothing to generate.")
files_found_error = False files_found_error = False
time.sleep(1) # sleep to avoid cpu load time.sleep(1) # sleep to avoid cpu load
except Exception, e: except Exception as e:
logger.warning( logger.warning(
"Caught exception \"{}\". Reloading.".format(e) "Caught exception \"{}\". Reloading.".format(e)
) )
continue continue
else: else:
pelican.run() pelican.run()
except Exception, e: except Exception as e:
logger.critical(unicode(e)) logger.critical(e)
if (args.verbosity == logging.DEBUG): if (args.verbosity == logging.DEBUG):
raise raise

View file

@ -1,4 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import copy import copy
import locale import locale
import logging import logging
@ -11,8 +14,10 @@ from sys import platform, stdin
from pelican.settings import _DEFAULT_CONFIG from pelican.settings import _DEFAULT_CONFIG
from pelican.utils import slugify, truncate_html_words, memoized from pelican.utils import (slugify, truncate_html_words, memoized,
python_2_unicode_compatible)
from pelican import signals from pelican import signals
import pelican.utils
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -85,13 +90,8 @@ class Page(object):
self.date_format = self.date_format[1] self.date_format = self.date_format[1]
if hasattr(self, 'date'): if hasattr(self, 'date'):
encoded_date = self.date.strftime( self.locale_date = pelican.utils.strftime(self.date,
self.date_format.encode('ascii', 'xmlcharrefreplace')) self.date_format)
if platform == 'win32':
self.locale_date = encoded_date.decode(stdin.encoding)
else:
self.locale_date = encoded_date.decode('utf')
# manage status # manage status
if not hasattr(self, 'status'): if not hasattr(self, 'status'):
@ -167,7 +167,7 @@ class Page(object):
origin = '/'.join((siteurl, origin = '/'.join((siteurl,
self._context['filenames'][value].url)) self._context['filenames'][value].url))
else: else:
logger.warning(u"Unable to find {fn}, skipping url" logger.warning("Unable to find {fn}, skipping url"
" replacement".format(fn=value)) " replacement".format(fn=value))
return m.group('markup') + m.group('quote') + origin \ return m.group('markup') + m.group('quote') + origin \
@ -243,10 +243,10 @@ class Article(Page):
class Quote(Page): class Quote(Page):
base_properties = ('author', 'date') base_properties = ('author', 'date')
@python_2_unicode_compatible
class URLWrapper(object): class URLWrapper(object):
def __init__(self, name, settings): def __init__(self, name, settings):
self.name = unicode(name) self.name = name
self.slug = slugify(self.name) self.slug = slugify(self.name)
self.settings = settings self.settings = settings
@ -257,12 +257,9 @@ class URLWrapper(object):
return hash(self.name) return hash(self.name)
def __eq__(self, other): def __eq__(self, other):
return self.name == unicode(other) return self.name == other
def __str__(self): def __str__(self):
return str(self.name.encode('utf-8', 'replace'))
def __unicode__(self):
return self.name return self.name
def _from_settings(self, key, get_page_name=False): def _from_settings(self, key, get_page_name=False):
@ -272,14 +269,14 @@ class URLWrapper(object):
Useful for pagination.""" Useful for pagination."""
setting = "%s_%s" % (self.__class__.__name__.upper(), key) setting = "%s_%s" % (self.__class__.__name__.upper(), key)
value = self.settings[setting] value = self.settings[setting]
if not isinstance(value, basestring): if not isinstance(value, six.string_types):
logger.warning(u'%s is set to %s' % (setting, value)) logger.warning('%s is set to %s' % (setting, value))
return value return value
else: else:
if get_page_name: if get_page_name:
return unicode(os.path.splitext(value)[0]).format(**self.as_dict()) return os.path.splitext(value)[0].format(**self.as_dict())
else: else:
return unicode(value).format(**self.as_dict()) return value.format(**self.as_dict())
page_name = property(functools.partial(_from_settings, key='URL', get_page_name=True)) page_name = property(functools.partial(_from_settings, key='URL', get_page_name=True))
url = property(functools.partial(_from_settings, key='URL')) url = property(functools.partial(_from_settings, key='URL'))
@ -292,13 +289,14 @@ class Category(URLWrapper):
class Tag(URLWrapper): class Tag(URLWrapper):
def __init__(self, name, *args, **kwargs): def __init__(self, name, *args, **kwargs):
super(Tag, self).__init__(unicode.strip(name), *args, **kwargs) super(Tag, self).__init__(name.strip(), *args, **kwargs)
class Author(URLWrapper): class Author(URLWrapper):
pass pass
@python_2_unicode_compatible
class StaticContent(object): class StaticContent(object):
def __init__(self, src, dst=None, settings=None): def __init__(self, src, dst=None, settings=None):
if not settings: if not settings:
@ -309,9 +307,6 @@ class StaticContent(object):
self.save_as = os.path.join(settings['OUTPUT_PATH'], self.url) self.save_as = os.path.join(settings['OUTPUT_PATH'], self.url)
def __str__(self): def __str__(self):
return str(self.filepath.encode('utf-8', 'replace'))
def __unicode__(self):
return self.filepath return self.filepath
@ -319,7 +314,7 @@ def is_valid_content(content, f):
try: try:
content.check_properties() content.check_properties()
return True return True
except NameError, e: except NameError as e:
logger.error(u"Skipping %s: impossible to find informations about" logger.error("Skipping %s: impossible to find informations about"
"'%s'" % (f, e)) "'%s'" % (f, e))
return False return False

View file

@ -1,10 +1,11 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os import os
import math import math
import random import random
import logging import logging
import datetime import datetime
import subprocess
import shutil import shutil
from codecs import open from codecs import open
@ -119,7 +120,7 @@ class Generator(object):
for item in items: for item in items:
value = getattr(self, item) value = getattr(self, item)
if hasattr(value, 'items'): if hasattr(value, 'items'):
value = value.items() value = list(value.items())
self.context[item] = value self.context[item] = value
@ -133,8 +134,8 @@ class _FileLoader(BaseLoader):
if template != self.path or not os.path.exists(self.fullpath): if template != self.path or not os.path.exists(self.fullpath):
raise TemplateNotFound(template) raise TemplateNotFound(template)
mtime = os.path.getmtime(self.fullpath) mtime = os.path.getmtime(self.fullpath)
with file(self.fullpath) as f: with open(self.fullpath, 'r', encoding='utf-8') as f:
source = f.read().decode('utf-8') source = f.read()
return source, self.fullpath, \ return source, self.fullpath, \
lambda: mtime == os.path.getmtime(self.fullpath) lambda: mtime == os.path.getmtime(self.fullpath)
@ -323,8 +324,8 @@ class ArticlesGenerator(Generator):
try: try:
signals.article_generate_preread.send(self) signals.article_generate_preread.send(self)
content, metadata = read_file(f, settings=self.settings) content, metadata = read_file(f, settings=self.settings)
except Exception, e: except Exception as e:
logger.warning(u'Could not process %s\n%s' % (f, str(e))) logger.warning('Could not process %s\n%s' % (f, str(e)))
continue continue
# if no category is set, use the name of the path as a category # if no category is set, use the name of the path as a category
@ -333,8 +334,7 @@ class ArticlesGenerator(Generator):
if (self.settings['USE_FOLDER_AS_CATEGORY'] if (self.settings['USE_FOLDER_AS_CATEGORY']
and os.path.dirname(f) != article_path): and os.path.dirname(f) != article_path):
# if the article is in a subdirectory # if the article is in a subdirectory
category = os.path.basename(os.path.dirname(f))\ category = os.path.basename(os.path.dirname(f))
.decode('utf-8')
else: else:
# if the article is not in a subdirectory # if the article is not in a subdirectory
category = self.settings['DEFAULT_CATEGORY'] category = self.settings['DEFAULT_CATEGORY']
@ -366,8 +366,8 @@ class ArticlesGenerator(Generator):
elif article.status == "draft": elif article.status == "draft":
self.drafts.append(article) self.drafts.append(article)
else: else:
logger.warning(u"Unknown status %s for file %s, skipping it." % logger.warning("Unknown status %s for file %s, skipping it." %
(repr(unicode.encode(article.status, 'utf-8')), (repr(article.status),
repr(f))) repr(f)))
self.articles, self.translations = process_translations(all_articles) self.articles, self.translations = process_translations(all_articles)
@ -394,7 +394,7 @@ class ArticlesGenerator(Generator):
tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True) tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')] tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]
tags = map(itemgetter(1), tag_cloud) tags = list(map(itemgetter(1), tag_cloud))
if tags: if tags:
max_count = max(tags) max_count = max(tags)
steps = self.settings.get('TAG_CLOUD_STEPS') steps = self.settings.get('TAG_CLOUD_STEPS')
@ -450,8 +450,8 @@ class PagesGenerator(Generator):
exclude=self.settings['PAGE_EXCLUDES']): exclude=self.settings['PAGE_EXCLUDES']):
try: try:
content, metadata = read_file(f, settings=self.settings) content, metadata = read_file(f, settings=self.settings)
except Exception, e: except Exception as e:
logger.warning(u'Could not process %s\n%s' % (f, str(e))) logger.warning('Could not process %s\n%s' % (f, str(e)))
continue continue
signals.pages_generate_context.send(self, metadata=metadata) signals.pages_generate_context.send(self, metadata=metadata)
page = Page(content, metadata, settings=self.settings, page = Page(content, metadata, settings=self.settings,
@ -466,8 +466,8 @@ class PagesGenerator(Generator):
elif page.status == "hidden": elif page.status == "hidden":
hidden_pages.append(page) hidden_pages.append(page)
else: else:
logger.warning(u"Unknown status %s for file %s, skipping it." % logger.warning("Unknown status %s for file %s, skipping it." %
(repr(unicode.encode(page.status, 'utf-8')), (repr(page.status),
repr(f))) repr(f)))
self.pages, self.translations = process_translations(all_pages) self.pages, self.translations = process_translations(all_pages)
@ -550,7 +550,7 @@ class PdfGenerator(Generator):
# print "Generating pdf for", obj.filename, " in ", output_pdf # print "Generating pdf for", obj.filename, " in ", output_pdf
with open(obj.filename) as f: with open(obj.filename) as f:
self.pdfcreator.createPdf(text=f.read(), output=output_pdf) self.pdfcreator.createPdf(text=f.read(), output=output_pdf)
logger.info(u' [ok] writing %s' % output_pdf) logger.info(' [ok] writing %s' % output_pdf)
def generate_context(self): def generate_context(self):
pass pass
@ -558,7 +558,7 @@ class PdfGenerator(Generator):
def generate_output(self, writer=None): def generate_output(self, writer=None):
# we don't use the writer passed as argument here # we don't use the writer passed as argument here
# since we write our own files # since we write our own files
logger.info(u' Generating PDF files...') logger.info(' Generating PDF files...')
pdf_path = os.path.join(self.output_path, 'pdf') pdf_path = os.path.join(self.output_path, 'pdf')
if not os.path.exists(pdf_path): if not os.path.exists(pdf_path):
try: try:
@ -583,6 +583,6 @@ class SourceFileGenerator(Generator):
copy('', obj.filename, dest) copy('', obj.filename, dest)
def generate_output(self, writer=None): def generate_output(self, writer=None):
logger.info(u' Generating source files...') logger.info(' Generating source files...')
for object in chain(self.context['articles'], self.context['pages']): for object in chain(self.context['articles'], self.context['pages']):
self._create_source(object, self.output_path) self._create_source(object, self.output_path)

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
__all__ = [ __all__ = [
'init' 'init'
] ]
@ -9,7 +12,7 @@ import logging
from logging import Formatter, getLogger, StreamHandler, DEBUG from logging import Formatter, getLogger, StreamHandler, DEBUG
RESET_TERM = u'\033[0;m' RESET_TERM = '\033[0;m'
COLOR_CODES = { COLOR_CODES = {
'red': 31, 'red': 31,
@ -24,37 +27,38 @@ COLOR_CODES = {
def ansi(color, text): def ansi(color, text):
"""Wrap text in an ansi escape sequence""" """Wrap text in an ansi escape sequence"""
code = COLOR_CODES[color] code = COLOR_CODES[color]
return u'\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM) return '\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM)
class ANSIFormatter(Formatter): class ANSIFormatter(Formatter):
""" """
Convert a `logging.LogReport' object into colored text, using ANSI escape sequences. Convert a `logging.LogRecord' object into colored text, using ANSI escape sequences.
""" """
## colors: ## colors:
def format(self, record): def format(self, record):
if record.levelname is 'INFO': msg = str(record.msg)
return ansi('cyan', '-> ') + unicode(record.msg) if record.levelname == 'INFO':
elif record.levelname is 'WARNING': return ansi('cyan', '-> ') + msg
return ansi('yellow', record.levelname) + ': ' + unicode(record.msg) elif record.levelname == 'WARNING':
elif record.levelname is 'ERROR': return ansi('yellow', record.levelname) + ': ' + msg
return ansi('red', record.levelname) + ': ' + unicode(record.msg) elif record.levelname == 'ERROR':
elif record.levelname is 'CRITICAL': return ansi('red', record.levelname) + ': ' + msg
return ansi('bgred', record.levelname) + ': ' + unicode(record.msg) elif record.levelname == 'CRITICAL':
elif record.levelname is 'DEBUG': return ansi('bgred', record.levelname) + ': ' + msg
return ansi('bggrey', record.levelname) + ': ' + unicode(record.msg) elif record.levelname == 'DEBUG':
return ansi('bggrey', record.levelname) + ': ' + msg
else: else:
return ansi('white', record.levelname) + ': ' + unicode(record.msg) return ansi('white', record.levelname) + ': ' + msg
class TextFormatter(Formatter): class TextFormatter(Formatter):
""" """
Convert a `logging.LogReport' object into text. Convert a `logging.LogRecord' object into text.
""" """
def format(self, record): def format(self, record):
if not record.levelname or record.levelname is 'INFO': if not record.levelname or record.levelname == 'INFO':
return record.msg return record.msg
else: else:
return record.levelname + ': ' + record.msg return record.levelname + ': ' + record.msg

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
# From django.core.paginator # From django.core.paginator
from math import ceil from math import ceil
@ -37,7 +40,7 @@ class Paginator(object):
Returns a 1-based range of pages for iterating through within Returns a 1-based range of pages for iterating through within
a template for loop. a template for loop.
""" """
return range(1, self.num_pages + 1) return list(range(1, self.num_pages + 1))
page_range = property(_get_page_range) page_range = property(_get_page_range)

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals
""" """
Asset management plugin for Pelican Asset management plugin for Pelican
=================================== ===================================

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
""" """
Copyright (c) Marco Milanesi <kpanic@gnufunk.org> Copyright (c) Marco Milanesi <kpanic@gnufunk.org>

View file

@ -68,7 +68,7 @@ def create_gzip_file(filepath):
logger.debug('Compressing: %s' % filepath) logger.debug('Compressing: %s' % filepath)
compressed = gzip.open(compressed_path, 'wb') compressed = gzip.open(compressed_path, 'wb')
compressed.writelines(uncompressed) compressed.writelines(uncompressed)
except Exception, ex: except Exception as ex:
logger.critical('Gzip compression failed: %s' % ex) logger.critical('Gzip compression failed: %s' % ex)
finally: finally:
compressed.close() compressed.close()

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from docutils import nodes from docutils import nodes
from docutils.parsers.rst import directives, Directive from docutils.parsers.rst import directives, Directive
from pelican import log
""" """
HTML tags for reStructuredText HTML tags for reStructuredText
@ -52,7 +53,7 @@ class RawHtml(Directive):
has_content = True has_content = True
def run(self): def run(self):
html = u' '.join(self.content) html = ' '.join(self.content)
node = nodes.raw('', html, format='html') node = nodes.raw('', html, format='html')
return [node] return [node]

View file

@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from pelican import signals from pelican import signals
def test(sender): def test(sender):
print "%s initialized !!" % sender print("%s initialized !!" % sender)
def register(): def register():
signals.initialized.connect(test) signals.initialized.connect(test)

View file

@ -41,8 +41,8 @@ def add_related_posts(generator, metadata):
if len(related_posts) < 1: if len(related_posts) < 1:
return return
relation_score = dict(zip(set(related_posts), map(related_posts.count, relation_score = dict(list(zip(set(related_posts), list(map(related_posts.count,
set(related_posts)))) set(related_posts))))))
ranked_related = sorted(relation_score, key=relation_score.get) ranked_related = sorted(relation_score, key=relation_score.get)
metadata["related_posts"] = ranked_related[:5] metadata["related_posts"] = ranked_related[:5]

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections import collections
import os.path import os.path
@ -7,19 +10,19 @@ from codecs import open
from pelican import signals, contents from pelican import signals, contents
TXT_HEADER = u"""{0}/index.html TXT_HEADER = """{0}/index.html
{0}/archives.html {0}/archives.html
{0}/tags.html {0}/tags.html
{0}/categories.html {0}/categories.html
""" """
XML_HEADER = u"""<?xml version="1.0" encoding="utf-8"?> XML_HEADER = """<?xml version="1.0" encoding="utf-8"?>
<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" <urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
""" """
XML_URL = u""" XML_URL = """
<url> <url>
<loc>{0}/{1}</loc> <loc>{0}/{1}</loc>
<lastmod>{2}</lastmod> <lastmod>{2}</lastmod>
@ -28,7 +31,7 @@ XML_URL = u"""
</url> </url>
""" """
XML_FOOTER = u""" XML_FOOTER = """
</urlset> </urlset>
""" """
@ -86,7 +89,7 @@ class SitemapGenerator(object):
'yearly', 'never') 'yearly', 'never')
if isinstance(pris, dict): if isinstance(pris, dict):
for k, v in pris.iteritems(): for k, v in pris.items():
if k in valid_keys and not isinstance(v, (int, float)): if k in valid_keys and not isinstance(v, (int, float)):
default = self.priorities[k] default = self.priorities[k]
warning("sitemap plugin: priorities must be numbers") warning("sitemap plugin: priorities must be numbers")
@ -99,7 +102,7 @@ class SitemapGenerator(object):
warning("sitemap plugin: using the default values") warning("sitemap plugin: using the default values")
if isinstance(chfreqs, dict): if isinstance(chfreqs, dict):
for k, v in chfreqs.iteritems(): for k, v in chfreqs.items():
if k in valid_keys and v not in valid_chfreqs: if k in valid_keys and v not in valid_chfreqs:
default = self.changefreqs[k] default = self.changefreqs[k]
warning("sitemap plugin: invalid changefreq `{0}'".format(v)) warning("sitemap plugin: invalid changefreq `{0}'".format(v))

View file

@ -1,4 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os import os
import re import re
try: try:
@ -20,15 +23,16 @@ try:
asciidoc = True asciidoc = True
except ImportError: except ImportError:
asciidoc = False asciidoc = False
import re
from pelican.contents import Category, Tag, Author from pelican.contents import Category, Tag, Author
from pelican.utils import get_date, pelican_open from pelican.utils import get_date, pelican_open
_METADATA_PROCESSORS = { _METADATA_PROCESSORS = {
'tags': lambda x, y: [Tag(tag, y) for tag in unicode(x).split(',')], 'tags': lambda x, y: [Tag(tag, y) for tag in x.split(',')],
'date': lambda x, y: get_date(x), 'date': lambda x, y: get_date(x),
'status': lambda x, y: unicode.strip(x), 'status': lambda x, y: x.strip(),
'category': Category, 'category': Category,
'author': Author, 'author': Author,
} }
@ -242,7 +246,7 @@ def read_file(filename, fmt=None, settings=None):
if filename_metadata: if filename_metadata:
match = re.match(filename_metadata, base) match = re.match(filename_metadata, base)
if match: if match:
for k, v in match.groupdict().iteritems(): for k, v in match.groupdict().items():
if k not in metadata: if k not in metadata:
k = k.lower() # metadata must be lowercase k = k.lower() # metadata must be lowercase
metadata[k] = reader.process_metadata(k, v) metadata[k] = reader.process_metadata(k, v)

View file

@ -1,4 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from docutils import nodes, utils from docutils import nodes, utils
from docutils.parsers.rst import directives, roles, Directive from docutils.parsers.rst import directives, roles, Directive
from pygments.formatters import HtmlFormatter from pygments.formatters import HtmlFormatter
@ -32,7 +34,7 @@ class Pygments(Directive):
# take an arbitrary option if more than one is given # take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \ formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter) parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')] return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments) directives.register_directive('code-block', Pygments)

20
pelican/server.py Normal file
View file

@ -0,0 +1,20 @@
from __future__ import print_function
try:
import SimpleHTTPServer as srvmod
except ImportError:
import http.server as srvmod
try:
import SocketServer as socketserver
except ImportError:
import socketserver
PORT = 8000
Handler = srvmod.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()

View file

@ -1,11 +1,13 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import copy import copy
import imp import imp
import inspect import inspect
import os import os
import locale import locale
import logging import logging
import re
from os.path import isabs from os.path import isabs
@ -54,8 +56,8 @@ _DEFAULT_CONFIG = {'PATH': '.',
'CATEGORY_SAVE_AS': 'category/{slug}.html', 'CATEGORY_SAVE_AS': 'category/{slug}.html',
'TAG_URL': 'tag/{slug}.html', 'TAG_URL': 'tag/{slug}.html',
'TAG_SAVE_AS': 'tag/{slug}.html', 'TAG_SAVE_AS': 'tag/{slug}.html',
'AUTHOR_URL': u'author/{slug}.html', 'AUTHOR_URL': 'author/{slug}.html',
'AUTHOR_SAVE_AS': u'author/{slug}.html', 'AUTHOR_SAVE_AS': 'author/{slug}.html',
'RELATIVE_URLS': True, 'RELATIVE_URLS': True,
'DEFAULT_LANG': 'en', 'DEFAULT_LANG': 'en',
'TAG_CLOUD_STEPS': 4, 'TAG_CLOUD_STEPS': 4,
@ -146,7 +148,7 @@ def configure_settings(settings):
# if locales is not a list, make it one # if locales is not a list, make it one
locales = settings['LOCALE'] locales = settings['LOCALE']
if isinstance(locales, basestring): if isinstance(locales, six.string_types):
locales = [locales] locales = [locales]
# try to set the different locales, fallback on the default. # try to set the different locales, fallback on the default.
@ -155,7 +157,7 @@ def configure_settings(settings):
for locale_ in locales: for locale_ in locales:
try: try:
locale.setlocale(locale.LC_ALL, locale_) locale.setlocale(locale.LC_ALL, str(locale_))
break # break if it is successful break # break if it is successful
except locale.Error: except locale.Error:
pass pass
@ -200,14 +202,14 @@ def configure_settings(settings):
"of the Webassets plugin") "of the Webassets plugin")
if 'OUTPUT_SOURCES_EXTENSION' in settings: if 'OUTPUT_SOURCES_EXTENSION' in settings:
if not isinstance(settings['OUTPUT_SOURCES_EXTENSION'], str): if not isinstance(settings['OUTPUT_SOURCES_EXTENSION'], six.string_types):
settings['OUTPUT_SOURCES_EXTENSION'] = _DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION'] settings['OUTPUT_SOURCES_EXTENSION'] = _DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION']
logger.warn("Detected misconfiguration with OUTPUT_SOURCES_EXTENSION." logger.warn("Detected misconfiguration with OUTPUT_SOURCES_EXTENSION."
" falling back to the default extension " + " falling back to the default extension " +
_DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION']) _DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION'])
filename_metadata = settings.get('FILENAME_METADATA') filename_metadata = settings.get('FILENAME_METADATA')
if filename_metadata and not isinstance(filename_metadata, basestring): if filename_metadata and not isinstance(filename_metadata, six.string_types):
logger.error("Detected misconfiguration with FILENAME_METADATA" logger.error("Detected misconfiguration with FILENAME_METADATA"
" setting (must be string or compiled pattern), falling" " setting (must be string or compiled pattern), falling"
"back to the default") "back to the default")

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from blinker import signal from blinker import signal
initialized = signal('pelican_initialized') initialized = signal('pelican_initialized')

View file

@ -1,7 +1,12 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import argparse import argparse
from HTMLParser import HTMLParser try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
import os import os
import subprocess import subprocess
import sys import sys
@ -15,14 +20,14 @@ from pelican.utils import slugify
def wp2fields(xml): def wp2fields(xml):
"""Opens a wordpress XML file, and yield pelican fields""" """Opens a wordpress XML file, and yield pelican fields"""
try: try:
from BeautifulSoup import BeautifulStoneSoup from bs4 import BeautifulSoup
except ImportError: except ImportError:
error = ('Missing dependency ' error = ('Missing dependency '
'"BeautifulSoup" required to import Wordpress XML files.') '"BeautifulSoup4" and "lxml" required to import Wordpress XML files.')
sys.exit(error) sys.exit(error)
xmlfile = open(xml, encoding='utf-8').read() xmlfile = open(xml, encoding='utf-8').read()
soup = BeautifulStoneSoup(xmlfile) soup = BeautifulSoup(xmlfile, "xml")
items = soup.rss.channel.findAll('item') items = soup.rss.channel.findAll('item')
for item in items: for item in items:
@ -54,10 +59,10 @@ def wp2fields(xml):
def dc2fields(file): def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields""" """Opens a Dotclear export file, and yield pelican fields"""
try: try:
from BeautifulSoup import BeautifulStoneSoup from bs4 import BeautifulSoup
except ImportError: except ImportError:
error = ('Missing dependency ' error = ('Missing dependency '
'"BeautifulSoup" required to import Dotclear files.') '"BeautifulSoup4" and "lxml" required to import Dotclear files.')
sys.exit(error) sys.exit(error)
@ -142,13 +147,27 @@ def dc2fields(file):
if len(tag) > 1: if len(tag) > 1:
if int(tag[:1]) == 1: if int(tag[:1]) == 1:
newtag = tag.split('"')[1] newtag = tag.split('"')[1]
tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES ))) tags.append(
BeautifulSoup(
newtag
, "xml"
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
else: else:
i=1 i=1
j=1 j=1
while(i <= int(tag[:1])): while(i <= int(tag[:1])):
newtag = tag.split('"')[j].replace('\\','') newtag = tag.split('"')[j].replace('\\','')
tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES ))) tags.append(
BeautifulSoup(
newtag
, "xml"
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
i=i+1 i=i+1
if j < int(tag[:1])*2: if j < int(tag[:1])*2:
j=j+2 j=j+2
@ -244,7 +263,7 @@ def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=Fals
# Replace newlines with paragraphs wrapped with <p> so # Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion # HTML is valid before conversion
paragraphs = content.splitlines() paragraphs = content.splitlines()
paragraphs = [u'<p>{0}</p>'.format(p) for p in paragraphs] paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]
new_content = ''.join(paragraphs) new_content = ''.join(paragraphs)
fp.write(new_content) fp.write(new_content)
@ -264,7 +283,7 @@ def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=Fals
elif rc > 0: elif rc > 0:
error = "Please, check your Pandoc installation." error = "Please, check your Pandoc installation."
exit(error) exit(error)
except OSError, e: except OSError as e:
error = "Pandoc execution failed: %s" % e error = "Pandoc execution failed: %s" % e
exit(error) exit(error)
@ -284,7 +303,7 @@ def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=Fals
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Transform feed, Wordpress or Dotclear files to reST (rst) " description="Transform feed, Wordpress or Dotclear files to reST (rst) "
"or Markdown (md) files. Be sure to have pandoc installed.", "or Markdown (md) files. Be sure to have pandoc installed",
formatter_class=argparse.ArgumentDefaultsHelpFormatter) formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='input', help='The input file to read') parser.add_argument(dest='input', help='The input file to read')
@ -304,10 +323,10 @@ def main():
help="Strip raw HTML code that can't be converted to " help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)") "markup such as flash embeds or iframes (wordpress import only)")
parser.add_argument('--disable-slugs', action='store_true', parser.add_argument('--disable-slugs', action='store_true',
dest='disable_slugs', dest='disable_slugs',
help='Disable storing slugs from imported posts within output. ' help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent ' 'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.') 'with your original posts.')
args = parser.parse_args() args = parser.parse_args()
@ -339,4 +358,4 @@ def main():
fields2pelican(fields, args.markup, args.output, fields2pelican(fields, args.markup, args.output,
dircat=args.dircat or False, dircat=args.dircat or False,
strip_raw=args.strip_raw or False, strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False) strip_slugs=args.disable_slugs or False)

View file

@ -1,5 +1,8 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- #
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os import os
import string import string
@ -29,11 +32,22 @@ CONF = {
'lang': 'en' 'lang': 'en'
} }
def _input_compat(prompt):
if six.PY3:
r = input(prompt)
else:
r = raw_input(prompt).decode('utf-8')
return r
if six.PY3:
str_compat = str
else:
str_compat = unicode
def decoding_strings(f): def decoding_strings(f):
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
out = f(*args, **kwargs) out = f(*args, **kwargs)
if isinstance(out, basestring): if isinstance(out, six.string_types):
# todo: make encoding configurable? # todo: make encoding configurable?
return out.decode(sys.stdin.encoding) return out.decode(sys.stdin.encoding)
return out return out
@ -55,14 +69,14 @@ def get_template(name, as_encoding='utf-8'):
@decoding_strings @decoding_strings
def ask(question, answer=str, default=None, l=None): def ask(question, answer=str_compat, default=None, l=None):
if answer == str: if answer == str_compat:
r = '' r = ''
while True: while True:
if default: if default:
r = raw_input('> {0} [{1}] '.format(question, default)) r = _input_compat('> {0} [{1}] '.format(question, default))
else: else:
r = raw_input('> {0} '.format(question, default)) r = _input_compat('> {0} '.format(question, default))
r = r.strip() r = r.strip()
@ -84,11 +98,11 @@ def ask(question, answer=str, default=None, l=None):
r = None r = None
while True: while True:
if default is True: if default is True:
r = raw_input('> {0} (Y/n) '.format(question)) r = _input_compat('> {0} (Y/n) '.format(question))
elif default is False: elif default is False:
r = raw_input('> {0} (y/N) '.format(question)) r = _input_compat('> {0} (y/N) '.format(question))
else: else:
r = raw_input('> {0} (y/n) '.format(question)) r = _input_compat('> {0} (y/n) '.format(question))
r = r.strip().lower() r = r.strip().lower()
@ -108,9 +122,9 @@ def ask(question, answer=str, default=None, l=None):
r = None r = None
while True: while True:
if default: if default:
r = raw_input('> {0} [{1}] '.format(question, default)) r = _input_compat('> {0} [{1}] '.format(question, default))
else: else:
r = raw_input('> {0} '.format(question)) r = _input_compat('> {0} '.format(question))
r = r.strip() r = r.strip()
@ -125,7 +139,7 @@ def ask(question, answer=str, default=None, l=None):
print('You must enter an integer') print('You must enter an integer')
return r return r
else: else:
raise NotImplemented('Argument `answer` must be str, bool, or integer') raise NotImplemented('Argument `answer` must be str_compat, bool, or integer')
def main(): def main():
@ -158,14 +172,14 @@ needed by Pelican.
print('Using project associated with current virtual environment.' print('Using project associated with current virtual environment.'
'Will save to:\n%s\n' % CONF['basedir']) 'Will save to:\n%s\n' % CONF['basedir'])
else: else:
CONF['basedir'] = os.path.abspath(ask('Where do you want to create your new web site?', answer=str, default=args.path)) CONF['basedir'] = os.path.abspath(ask('Where do you want to create your new web site?', answer=str_compat, default=args.path))
CONF['sitename'] = ask('What will be the title of this web site?', answer=str, default=args.title) CONF['sitename'] = ask('What will be the title of this web site?', answer=str_compat, default=args.title)
CONF['author'] = ask('Who will be the author of this web site?', answer=str, default=args.author) CONF['author'] = ask('Who will be the author of this web site?', answer=str_compat, default=args.author)
CONF['lang'] = ask('What will be the default language of this web site?', str, args.lang or CONF['lang'], 2) CONF['lang'] = ask('What will be the default language of this web site?', str_compat, args.lang or CONF['lang'], 2)
if ask('Do you want to specify a URL prefix? e.g., http://example.com ', answer=bool, default=True): if ask('Do you want to specify a URL prefix? e.g., http://example.com ', answer=bool, default=True):
CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str, CONF['siteurl']) CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str_compat, CONF['siteurl'])
CONF['with_pagination'] = ask('Do you want to enable article pagination?', bool, bool(CONF['default_pagination'])) CONF['with_pagination'] = ask('Do you want to enable article pagination?', bool, bool(CONF['default_pagination']))
@ -179,38 +193,38 @@ needed by Pelican.
if mkfile: if mkfile:
if ask('Do you want to upload your website using FTP?', answer=bool, default=False): if ask('Do you want to upload your website using FTP?', answer=bool, default=False):
CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str, CONF['ftp_host']) CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str_compat, CONF['ftp_host'])
CONF['ftp_user'] = ask('What is your username on that server?', str, CONF['ftp_user']) CONF['ftp_user'] = ask('What is your username on that server?', str_compat, CONF['ftp_user'])
CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str, CONF['ftp_target_dir']) CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ftp_target_dir'])
if ask('Do you want to upload your website using SSH?', answer=bool, default=False): if ask('Do you want to upload your website using SSH?', answer=bool, default=False):
CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str, CONF['ssh_host']) CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str_compat, CONF['ssh_host'])
CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port']) CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port'])
CONF['ssh_user'] = ask('What is your username on that server?', str, CONF['ssh_user']) CONF['ssh_user'] = ask('What is your username on that server?', str_compat, CONF['ssh_user'])
CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str, CONF['ssh_target_dir']) CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ssh_target_dir'])
if ask('Do you want to upload your website using Dropbox?', answer=bool, default=False): if ask('Do you want to upload your website using Dropbox?', answer=bool, default=False):
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str, CONF['dropbox_dir']) CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str_compat, CONF['dropbox_dir'])
try: try:
os.makedirs(os.path.join(CONF['basedir'], 'content')) os.makedirs(os.path.join(CONF['basedir'], 'content'))
except OSError, e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
try: try:
os.makedirs(os.path.join(CONF['basedir'], 'output')) os.makedirs(os.path.join(CONF['basedir'], 'output'))
except OSError, e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
try: try:
with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w', 'utf-8') as fd: with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w', 'utf-8') as fd:
conf_python = dict() conf_python = dict()
for key, value in CONF.iteritems(): for key, value in CONF.items():
conf_python[key] = repr(value) conf_python[key] = repr(value)
for line in get_template('pelicanconf.py'): for line in get_template('pelicanconf.py'):
template = string.Template(line) template = string.Template(line)
fd.write(template.safe_substitute(conf_python)) fd.write(template.safe_substitute(conf_python))
fd.close() fd.close()
except OSError, e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
try: try:
@ -219,7 +233,7 @@ needed by Pelican.
template = string.Template(line) template = string.Template(line)
fd.write(template.safe_substitute(CONF)) fd.write(template.safe_substitute(CONF))
fd.close() fd.close()
except OSError, e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
if mkfile: if mkfile:
@ -229,13 +243,13 @@ needed by Pelican.
template = string.Template(line) template = string.Template(line)
fd.write(template.safe_substitute(CONF)) fd.write(template.safe_substitute(CONF))
fd.close() fd.close()
except OSError, e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
if develop: if develop:
conf_shell = dict() conf_shell = dict()
for key, value in CONF.iteritems(): for key, value in CONF.items():
if isinstance(value, basestring) and ' ' in value: if isinstance(value, six.string_types) and ' ' in value:
value = '"' + value.replace('"', '\\"') + '"' value = '"' + value.replace('"', '\\"') + '"'
conf_shell[key] = value conf_shell[key] = value
try: try:
@ -244,8 +258,8 @@ needed by Pelican.
template = string.Template(line) template = string.Template(line)
fd.write(template.safe_substitute(conf_shell)) fd.write(template.safe_substitute(conf_shell))
fd.close() fd.close()
os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 0755) os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 493) # mode 0o755
except OSError, e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
print('Done. Your new project is available at %s' % CONF['basedir']) print('Done. Your new project is available at %s' % CONF['basedir'])

View file

@ -1,5 +1,8 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import argparse import argparse
import os import os
@ -28,7 +31,7 @@ _BUILTIN_THEMES = ['simple', 'notmyidea']
def err(msg, die=None): def err(msg, die=None):
"""Print an error message and exits if an exit code is given""" """Print an error message and exits if an exit code is given"""
sys.stderr.write(str(msg) + '\n') sys.stderr.write(msg + '\n')
if die: if die:
sys.exit((die if type(die) is int else 1)) sys.exit((die if type(die) is int else 1))
@ -186,13 +189,13 @@ def install(path, v=False, u=False):
for root, dirs, files in os.walk(theme_path): for root, dirs, files in os.walk(theme_path):
for d in dirs: for d in dirs:
dname = os.path.join(root, d) dname = os.path.join(root, d)
os.chmod(dname, 0755) os.chmod(dname, 493) # 0o755
for f in files: for f in files:
fname = os.path.join(root, f) fname = os.path.join(root, f)
os.chmod(fname, 0644) os.chmod(fname, 420) # 0o644
except OSError, e: except OSError as e:
err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False) err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False)
except Exception, e: except Exception as e:
err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e))) err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
@ -212,7 +215,7 @@ def symlink(path, v=False):
print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path)) print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
try: try:
os.symlink(path, theme_path) os.symlink(path, theme_path)
except Exception, e: except Exception as e:
err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e))) err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
@ -233,7 +236,7 @@ def clean(v=False):
print('Removing {0}'.format(path)) print('Removing {0}'.format(path))
try: try:
os.remove(path) os.remove(path)
except OSError, e: except OSError as e:
print('Error: cannot remove {0}'.format(path)) print('Error: cannot remove {0}'.format(path))
else: else:
c+=1 c+=1

View file

@ -49,7 +49,7 @@ regenerate: clean
$$(PELICAN) -r $$(INPUTDIR) -o $$(OUTPUTDIR) -s $$(CONFFILE) $$(PELICANOPTS) $$(PELICAN) -r $$(INPUTDIR) -o $$(OUTPUTDIR) -s $$(CONFFILE) $$(PELICANOPTS)
serve: serve:
cd $$(OUTPUTDIR) && python -m SimpleHTTPServer cd $$(OUTPUTDIR) && python -m pelican.server
devserver: devserver:
$$(BASEDIR)/develop_server.sh restart $$(BASEDIR)/develop_server.sh restart

View file

@ -20,7 +20,7 @@ PELICAN_PID=$$BASEDIR/pelican.pid
function usage(){ function usage(){
echo "usage: $$0 (stop) (start) (restart)" echo "usage: $$0 (stop) (start) (restart)"
echo "This starts pelican in debug and reload mode and then launches" echo "This starts pelican in debug and reload mode and then launches"
echo "A SimpleHTTP server to help site development. It doesn't read" echo "A pelican.server to help site development. It doesn't read"
echo "your pelican options so you edit any paths in your Makefile" echo "your pelican options so you edit any paths in your Makefile"
echo "you will need to edit it as well" echo "you will need to edit it as well"
exit 3 exit 3
@ -31,14 +31,14 @@ function shut_down(){
PID=$$(cat $$SRV_PID) PID=$$(cat $$SRV_PID)
PROCESS=$$(ps -p $$PID | tail -n 1 | awk '{print $$4}') PROCESS=$$(ps -p $$PID | tail -n 1 | awk '{print $$4}')
if [[ $$PROCESS != "" ]]; then if [[ $$PROCESS != "" ]]; then
echo "Killing SimpleHTTPServer" echo "Killing pelican.server"
kill $$PID kill $$PID
else else
echo "Stale PID, deleting" echo "Stale PID, deleting"
fi fi
rm $$SRV_PID rm $$SRV_PID
else else
echo "SimpleHTTPServer PIDFile not found" echo "pelican.server PIDFile not found"
fi fi
if [[ -f $$PELICAN_PID ]]; then if [[ -f $$PELICAN_PID ]]; then
@ -57,15 +57,15 @@ function shut_down(){
} }
function start_up(){ function start_up(){
echo "Starting up Pelican and SimpleHTTPServer" echo "Starting up Pelican and pelican.server"
shift shift
$$PELICAN --debug --autoreload -r $$INPUTDIR -o $$OUTPUTDIR -s $$CONFFILE $$PELICANOPTS & $$PELICAN --debug --autoreload -r $$INPUTDIR -o $$OUTPUTDIR -s $$CONFFILE $$PELICANOPTS &
echo $$! > $$PELICAN_PID echo $$! > $$PELICAN_PID
cd $$OUTPUTDIR cd $$OUTPUTDIR
python -m SimpleHTTPServer & python -m pelican.server &
echo $$! > $$SRV_PID echo $$! > $$SRV_PID
cd $$BASEDIR cd $$BASEDIR
sleep 1 && echo 'Pelican and SimpleHTTPServer processes now running in background.' sleep 1 && echo 'Pelican and pelican.server processes now running in background.'
} }
### ###

View file

@ -1,10 +1,14 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os import os
import re import re
import pytz import pytz
import shutil import shutil
import logging import logging
import errno import errno
import locale
from collections import defaultdict, Hashable from collections import defaultdict, Hashable
from functools import partial from functools import partial
@ -17,6 +21,77 @@ from operator import attrgetter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def strftime(date, date_format):
"""
Replacement for the builtin strftime().
This :func:`strftime()` is compatible to Python 2 and 3. In both cases,
input and output is always unicode.
Still, Python 3's :func:`strftime()` seems to somehow "normalize" unicode
chars in the format string. So if e.g. your format string contains 'ø' or
'ä', the result will be 'o' and 'a'.
See here for an `extensive testcase <https://github.com/dmdm/test_strftime>`_.
:param date: Any object that sports a :meth:`strftime()` method.
:param date_format: Format string, can always be unicode.
:returns: Unicode string with formatted date.
"""
# As tehkonst confirmed, above mentioned testcase runs correctly on
# Python 2 and 3 on Windows as well. Thanks.
if six.PY3:
# It could be so easy... *sigh*
return date.strftime(date_format)
# TODO Perhaps we should refactor again, so that the
# xmlcharrefreplace-regex-dance is always done, regardless
# of the Python version.
else:
# We must ensure that the format string is an encoded byte
# string, ASCII only WTF!!!
# But with "xmlcharrefreplace" our formatted date will produce
# *yuck* like this:
# "Øl trinken beim Besäufnis"
# --> "&#216;l trinken beim Bes&#228;ufnis"
date_format = date_format.encode('ascii',
errors="xmlcharrefreplace")
result = date.strftime(date_format)
# strftime() returns an encoded byte string
# which we must decode into unicode.
lang_code, enc = locale.getlocale(locale.LC_ALL)
if enc:
result = result.decode(enc)
else:
result = unicode(result)
# Convert XML character references back to unicode characters.
if "&#" in result:
result = re.sub(r'&#(?P<num>\d+);'
, lambda m: unichr(int(m.group('num')))
, result
)
return result
#----------------------------------------------------------------------------
# Stolen from Django: django.utils.encoding
#
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
#----------------------------------------------------------------------------
class NoFilesError(Exception): class NoFilesError(Exception):
pass pass
@ -78,14 +153,24 @@ def slugify(value):
Took from django sources. Took from django sources.
""" """
# TODO Maybe steal again from current Django 1.5dev
value = Markup(value).striptags() value = Markup(value).striptags()
if type(value) == unicode: # value must be unicode per se
import unicodedata import unicodedata
from unidecode import unidecode from unidecode import unidecode
value = unicode(unidecode(value)) # unidecode returns str in Py2 and 3, so in Py2 we have to make
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') # it unicode again
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) value = unidecode(value)
return re.sub('[-\s]+', '-', value) if isinstance(value, six.binary_type):
value = value.decode('ascii')
# still unicode
value = unicodedata.normalize('NFKD', value)
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
# we want only ASCII chars
value = value.encode('ascii', 'ignore')
# but Pelican should generally use only unicode
return value.decode('ascii')
def copy(path, source, destination, destination_path=None, overwrite=False): def copy(path, source, destination, destination_path=None, overwrite=False):
@ -137,7 +222,7 @@ def clean_output_dir(path):
if not os.path.isdir(path): if not os.path.isdir(path):
try: try:
os.remove(path) os.remove(path)
except Exception, e: except Exception as e:
logger.error("Unable to delete file %s; %e" % path, e) logger.error("Unable to delete file %s; %e" % path, e)
return return
@ -148,13 +233,13 @@ def clean_output_dir(path):
try: try:
shutil.rmtree(file) shutil.rmtree(file)
logger.debug("Deleted directory %s" % file) logger.debug("Deleted directory %s" % file)
except Exception, e: except Exception as e:
logger.error("Unable to delete directory %s; %e" % file, e) logger.error("Unable to delete directory %s; %e" % file, e)
elif os.path.isfile(file) or os.path.islink(file): elif os.path.isfile(file) or os.path.islink(file):
try: try:
os.remove(file) os.remove(file)
logger.debug("Deleted file/link %s" % file) logger.debug("Deleted file/link %s" % file)
except Exception, e: except Exception as e:
logger.error("Unable to delete file %s; %e" % file, e) logger.error("Unable to delete file %s; %e" % file, e)
else: else:
logger.error("Unable to delete %s, file type unknown" % file) logger.error("Unable to delete %s, file type unknown" % file)
@ -180,7 +265,7 @@ def truncate_html_words(s, num, end_text='...'):
""" """
length = int(num) length = int(num)
if length <= 0: if length <= 0:
return u'' return ''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area',
'hr', 'input') 'hr', 'input')
@ -254,10 +339,10 @@ def process_translations(content_list):
for slug, items in grouped_by_slugs: for slug, items in grouped_by_slugs:
items = list(items) items = list(items)
# find items with default language # find items with default language
default_lang_items = filter(attrgetter('in_default_lang'), items) default_lang_items = list(filter(attrgetter('in_default_lang'), items))
len_ = len(default_lang_items) len_ = len(default_lang_items)
if len_ > 1: if len_ > 1:
logger.warning(u'there are %s variants of "%s"' % (len_, slug)) logger.warning('there are %s variants of "%s"' % (len_, slug))
for x in default_lang_items: for x in default_lang_items:
logger.warning(' %s' % x.filename) logger.warning(' %s' % x.filename)
elif len_ == 0: elif len_ == 0:
@ -269,12 +354,9 @@ def process_translations(content_list):
+ 'content' + 'content'
logger.warning(msg) logger.warning(msg)
index.extend(default_lang_items) index.extend(default_lang_items)
translations.extend(filter( translations.extend([x for x in items if x not in default_lang_items])
lambda x: x not in default_lang_items,
items
))
for a in items: for a in items:
a.translations = filter(lambda x: x != a, items) a.translations = [x for x in items if x != a]
return index, translations return index, translations
@ -333,6 +415,6 @@ def set_date_tzinfo(d, tz_name=None):
def mkdir_p(path): def mkdir_p(path):
try: try:
os.makedirs(path) os.makedirs(path)
except OSError, e: except OSError as e:
if e.errno != errno.EEXIST: if e.errno != errno.EEXIST:
raise raise

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import with_statement from __future__ import with_statement, unicode_literals, print_function
import six
import os import os
import locale import locale
@ -57,7 +58,7 @@ class Writer(object):
:param feed_type: the feed type to use (atom or rss) :param feed_type: the feed type to use (atom or rss)
""" """
old_locale = locale.setlocale(locale.LC_ALL) old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, str('C'))
try: try:
self.site_url = context.get('SITEURL', get_relative_path(filename)) self.site_url = context.get('SITEURL', get_relative_path(filename))
self.feed_domain = context.get('FEED_DOMAIN') self.feed_domain = context.get('FEED_DOMAIN')
@ -68,7 +69,7 @@ class Writer(object):
max_items = len(elements) max_items = len(elements)
if self.settings['FEED_MAX_ITEMS']: if self.settings['FEED_MAX_ITEMS']:
max_items = min(self.settings['FEED_MAX_ITEMS'], max_items) max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)
for i in xrange(max_items): for i in range(max_items):
self._add_item_to_the_feed(feed, elements[i]) self._add_item_to_the_feed(feed, elements[i])
if filename: if filename:
@ -77,7 +78,7 @@ class Writer(object):
os.makedirs(os.path.dirname(complete_path)) os.makedirs(os.path.dirname(complete_path))
except Exception: except Exception:
pass pass
fp = open(complete_path, 'w') fp = open(complete_path, 'w', encoding='utf-8' if six.PY3 else None)
feed.write(fp, 'utf-8') feed.write(fp, 'utf-8')
logger.info('writing %s' % complete_path) logger.info('writing %s' % complete_path)
@ -108,7 +109,7 @@ class Writer(object):
def _write_file(template, localcontext, output_path, name): def _write_file(template, localcontext, output_path, name):
"""Render the template write the file.""" """Render the template write the file."""
old_locale = locale.setlocale(locale.LC_ALL) old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, str('C'))
try: try:
output = template.render(localcontext) output = template.render(localcontext)
finally: finally:
@ -120,7 +121,7 @@ class Writer(object):
pass pass
with open(filename, 'w', encoding='utf-8') as f: with open(filename, 'w', encoding='utf-8') as f:
f.write(output) f.write(output)
logger.info(u'writing %s' % filename) logger.info('writing %s' % filename)
localcontext = context.copy() localcontext = context.copy()
if relative_urls: if relative_urls:
@ -135,7 +136,7 @@ class Writer(object):
if paginated: if paginated:
# pagination needed, init paginators # pagination needed, init paginators
paginators = {} paginators = {}
for key in paginated.iterkeys(): for key in paginated.keys():
object_list = paginated[key] object_list = paginated[key]
if self.settings.get('DEFAULT_PAGINATION'): if self.settings.get('DEFAULT_PAGINATION'):
@ -147,9 +148,9 @@ class Writer(object):
# generated pages, and write # generated pages, and write
name_root, ext = os.path.splitext(name) name_root, ext = os.path.splitext(name)
for page_num in range(paginators.values()[0].num_pages): for page_num in range(list(paginators.values())[0].num_pages):
paginated_localcontext = localcontext.copy() paginated_localcontext = localcontext.copy()
for key in paginators.iterkeys(): for key in paginators.keys():
paginator = paginators[key] paginator = paginators[key]
page = paginator.page(page_num + 1) page = paginator.page(page_num + 1)
paginated_localcontext.update( paginated_localcontext.update(

View file

@ -1,6 +1,8 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
AUTHOR = u'Alexis Métaireau' from __future__ import unicode_literals
SITENAME = u"Alexis' log"
AUTHOR = 'Alexis Métaireau'
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org' SITEURL = 'http://blog.notmyidea.org'
TIMEZONE = "Europe/Paris" TIMEZONE = "Europe/Paris"
@ -10,7 +12,7 @@ PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True REVERSE_CATEGORY_ORDER = True
LOCALE = "C" LOCALE = "C"
DEFAULT_PAGINATION = 4 DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 03, 02, 14, 01, 01) DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
FEED_ALL_RSS = 'feeds/all.rss.xml' FEED_ALL_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/%s.rss.xml' CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'
@ -19,7 +21,7 @@ LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"), ('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"), ('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"), ('N1k0', "http://prendreuncafe.com/blog/"),
(u'Tarek Ziadé', "http://ziade.org/blog"), ('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),) ('Zubin Mithra', "http://zubin71.wordpress.com/"),)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'), SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),

View file

@ -1,8 +1,8 @@
#!/usr/bin/env python #!/usr/bin/env python
from setuptools import setup from setuptools import setup
requires = ['feedgenerator', 'jinja2 >= 2.6', 'pygments', 'docutils', 'pytz', requires = ['feedgenerator>=1.5', 'jinja2 >= 2.6', 'pygments', 'docutils', 'pytz',
'blinker', 'unidecode'] 'blinker', 'unidecode', 'six']
try: try:
import argparse # NOQA import argparse # NOQA

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
AUTHOR = u'Alexis Métaireau' from __future__ import unicode_literals, print_function
SITENAME = u"Alexis' log" AUTHOR = 'Alexis Métaireau'
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org' SITEURL = 'http://blog.notmyidea.org'
TIMEZONE = 'UTC' TIMEZONE = 'UTC'
@ -18,7 +19,7 @@ LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"), ('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"), ('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"), ('N1k0', "http://prendreuncafe.com/blog/"),
(u'Tarek Ziadé', "http://ziade.org/blog"), ('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),) ('Zubin Mithra', "http://zubin71.wordpress.com/"),)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'), SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),

View file

@ -1,13 +1,15 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
__all__ = [ __all__ = [
'get_article', 'get_article',
'unittest', 'unittest',
] ]
import cStringIO
import os import os
import re import re
import subprocess import subprocess
import sys import sys
from six import StringIO
import logging import logging
from logging.handlers import BufferingHandler from logging.handlers import BufferingHandler
@ -101,7 +103,7 @@ def mute(returns_output=False):
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
saved_stdout = sys.stdout saved_stdout = sys.stdout
sys.stdout = cStringIO.StringIO() sys.stdout = StringIO()
try: try:
out = func(*args, **kwargs) out = func(*args, **kwargs)

View file

@ -113,8 +113,8 @@ class TestPage(unittest.TestCase):
page = Page(**page_kwargs) page = Page(**page_kwargs)
self.assertEqual(page.locale_date, self.assertEqual(page.locale_date,
unicode(dt.strftime(_DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']), dt.strftime(_DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']))
'utf-8'))
page_kwargs['settings'] = dict([(x, _DEFAULT_CONFIG[x]) for x in page_kwargs['settings'] = dict([(x, _DEFAULT_CONFIG[x]) for x in
_DEFAULT_CONFIG]) _DEFAULT_CONFIG])
@ -131,7 +131,7 @@ class TestPage(unittest.TestCase):
import locale as locale_module import locale as locale_module
try: try:
page = Page(**page_kwargs) page = Page(**page_kwargs)
self.assertEqual(page.locale_date, u'2015-09-13(\u65e5)') self.assertEqual(page.locale_date, '2015-09-13(\u65e5)')
except locale_module.Error: except locale_module.Error:
# The constructor of ``Page`` will try to set the locale to # The constructor of ``Page`` will try to set the locale to
# ``ja_JP.utf8``. But this attempt will failed when there is no # ``ja_JP.utf8``. But this attempt will failed when there is no

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from mock import MagicMock from mock import MagicMock
import os import os
@ -31,7 +32,7 @@ class TestArticlesGenerator(unittest.TestCase):
settings = get_settings() settings = get_settings()
settings['ARTICLE_DIR'] = 'content' settings['ARTICLE_DIR'] = 'content'
settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 01, 01) settings['DEFAULT_DATE'] = (1970, 1, 1)
self.generator = ArticlesGenerator(settings.copy(), settings, self.generator = ArticlesGenerator(settings.copy(), settings,
CUR_DIR, settings['THEME'], None, CUR_DIR, settings['THEME'], None,
settings['MARKUP']) settings['MARKUP'])
@ -70,18 +71,18 @@ class TestArticlesGenerator(unittest.TestCase):
generator = self.get_populated_generator() generator = self.get_populated_generator()
articles = self.distill_articles(generator.articles) articles = self.distill_articles(generator.articles)
articles_expected = [ articles_expected = [
[u'Article title', 'published', 'Default', 'article'], ['Article title', 'published', 'Default', 'article'],
[u'Article with markdown and summary metadata single', 'published', u'Default', 'article'], ['Article with markdown and summary metadata single', 'published', 'Default', 'article'],
[u'Article with markdown and summary metadata multi', 'published', u'Default', 'article'], ['Article with markdown and summary metadata multi', 'published', 'Default', 'article'],
[u'Article with template', 'published', 'Default', 'custom'], ['Article with template', 'published', 'Default', 'custom'],
[u'Test md File', 'published', 'test', 'article'], ['Test md File', 'published', 'test', 'article'],
[u'Rst with filename metadata', 'published', u'yeah', 'article'], ['Rst with filename metadata', 'published', 'yeah', 'article'],
[u'Test Markdown extensions', 'published', u'Default', 'article'], ['Test Markdown extensions', 'published', 'Default', 'article'],
[u'This is a super article !', 'published', 'Yeah', 'article'], ['This is a super article !', 'published', 'Yeah', 'article'],
[u'This is an article with category !', 'published', 'yeah', 'article'], ['This is an article with category !', 'published', 'yeah', 'article'],
[u'This is an article without category !', 'published', 'Default', 'article'], ['This is an article without category !', 'published', 'Default', 'article'],
[u'This is an article without category !', 'published', 'TestCategory', 'article'], ['This is an article without category !', 'published', 'TestCategory', 'article'],
[u'This is a super article !', 'published', 'yeah', 'article'] ['This is a super article !', 'published', 'yeah', 'article']
] ]
self.assertItemsEqual(articles_expected, articles) self.assertItemsEqual(articles_expected, articles)
@ -97,7 +98,7 @@ class TestArticlesGenerator(unittest.TestCase):
settings = _DEFAULT_CONFIG.copy() settings = _DEFAULT_CONFIG.copy()
settings['ARTICLE_DIR'] = 'content' settings['ARTICLE_DIR'] = 'content'
settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 01, 01) settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['USE_FOLDER_AS_CATEGORY'] = False settings['USE_FOLDER_AS_CATEGORY'] = False
settings['filenames'] = {} settings['filenames'] = {}
generator = ArticlesGenerator(settings.copy(), settings, generator = ArticlesGenerator(settings.copy(), settings,
@ -179,7 +180,7 @@ class TestPageGenerator(unittest.TestCase):
def test_generate_context(self): def test_generate_context(self):
settings = get_settings() settings = get_settings()
settings['PAGE_DIR'] = 'TestPages' settings['PAGE_DIR'] = 'TestPages'
settings['DEFAULT_DATE'] = (1970, 01, 01) settings['DEFAULT_DATE'] = (1970, 1, 1)
generator = PagesGenerator(settings.copy(), settings, CUR_DIR, generator = PagesGenerator(settings.copy(), settings, CUR_DIR,
settings['THEME'], None, settings['THEME'], None,
@ -189,14 +190,14 @@ class TestPageGenerator(unittest.TestCase):
hidden_pages = self.distill_pages(generator.hidden_pages) hidden_pages = self.distill_pages(generator.hidden_pages)
pages_expected = [ pages_expected = [
[u'This is a test page', 'published', 'page'], ['This is a test page', 'published', 'page'],
[u'This is a markdown test page', 'published', 'page'], ['This is a markdown test page', 'published', 'page'],
[u'This is a test page with a preset template', 'published', 'custom'] ['This is a test page with a preset template', 'published', 'custom']
] ]
hidden_pages_expected = [ hidden_pages_expected = [
[u'This is a test hidden page', 'hidden', 'page'], ['This is a test hidden page', 'hidden', 'page'],
[u'This is a markdown test hidden page', 'hidden', 'page'], ['This is a markdown test hidden page', 'hidden', 'page'],
[u'This is a test hidden page with a custom template', 'hidden', 'custom'] ['This is a test hidden page with a custom template', 'hidden', 'custom']
] ]
self.assertItemsEqual(pages_expected,pages) self.assertItemsEqual(pages_expected,pages)

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os import os
@ -9,7 +10,7 @@ CUR_DIR = os.path.dirname(__file__)
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml') WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
try: try:
import BeautifulSoup from bs4 import BeautifulSoup
except ImportError: except ImportError:
BeautifulSoup = False # NOQA BeautifulSoup = False # NOQA
@ -48,26 +49,6 @@ class TestWordpressXmlImporter(unittest.TestCase):
strip_raw=True)) strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files)) self.assertFalse(any('<iframe' in rst for rst in rst_files))
def test_can_toggle_slug_storage(self):
posts = list(self.posts)
r = lambda f: open(f).read()
silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp:
rst_files = (r(f) for f in silent_f2p(posts, 'markdown', temp))
self.assertTrue(all('Slug:' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(posts, 'markdown', temp,
disable_slugs=True))
self.assertFalse(any('Slug:' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(posts, 'rst', temp))
self.assertTrue(all(':slug:' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(posts, 'rst', temp,
disable_slugs=True))
self.assertFalse(any(':slug:' in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self): def test_decode_html_entities_in_titles(self):
posts = list(self.posts) posts = list(self.posts)
test_posts = [post for post in posts if post[2] == 'html-entity-test'] test_posts = [post for post in posts if post[2] == 'html-entity-test']

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
try: try:
import unittest2 as unittest import unittest2 as unittest
except ImportError: except ImportError:
@ -34,7 +36,7 @@ def recursiveDiff(dcmp):
for f in dcmp.right_only], for f in dcmp.right_only],
} }
for sub_dcmp in dcmp.subdirs.values(): for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).iteritems(): for k, v in recursiveDiff(sub_dcmp).items():
diff[k] += v diff[k] += v
return diff return diff
@ -48,7 +50,7 @@ class TestPelican(unittest.TestCase):
logging.getLogger().addHandler(self.logcount_handler) logging.getLogger().addHandler(self.logcount_handler)
self.temp_path = mkdtemp() self.temp_path = mkdtemp()
self.old_locale = locale.setlocale(locale.LC_ALL) self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self): def tearDown(self):
rmtree(self.temp_path) rmtree(self.temp_path)

View file

@ -6,7 +6,7 @@ import tempfile
from pelican.plugins import gzip_cache from pelican.plugins import gzip_cache
from support import unittest, temporary_folder from .support import unittest, temporary_folder
class TestGzipCache(unittest.TestCase): class TestGzipCache(unittest.TestCase):
'''Unit tests for the gzip cache plugin''' '''Unit tests for the gzip cache plugin'''

View file

@ -1,4 +1,5 @@
# coding: utf-8 # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import datetime import datetime
import os import os
@ -21,11 +22,11 @@ class RstReaderTest(unittest.TestCase):
content, metadata = reader.read(_filename('article_with_metadata.rst')) content, metadata = reader.read(_filename('article_with_metadata.rst'))
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author': u'Alexis Métaireau', 'author': 'Alexis Métaireau',
'title': 'This is a super article !', 'title': 'This is a super article !',
'summary': u'<p class="first last">Multi-line metadata should be'\ 'summary': '<p class="first last">Multi-line metadata should be'\
u' supported\nas well as <strong>inline'\ ' supported\nas well as <strong>inline'\
u' markup</strong>.</p>\n', ' markup</strong>.</p>\n',
'date': datetime.datetime(2010, 12, 2, 10, 14), 'date': datetime.datetime(2010, 12, 2, 10, 14),
'tags': ['foo', 'bar', 'foobar'], 'tags': ['foo', 'bar', 'foobar'],
'custom_field': 'http://notmyidea.org', 'custom_field': 'http://notmyidea.org',
@ -40,7 +41,7 @@ class RstReaderTest(unittest.TestCase):
settings={}) settings={})
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author': u'Alexis Métaireau', 'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata', 'title': 'Rst with filename metadata',
} }
for key, value in metadata.items(): for key, value in metadata.items():
@ -53,7 +54,7 @@ class RstReaderTest(unittest.TestCase):
}) })
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author': u'Alexis Métaireau', 'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata', 'title': 'Rst with filename metadata',
'date': datetime.datetime(2012, 11, 29), 'date': datetime.datetime(2012, 11, 29),
} }
@ -69,7 +70,7 @@ class RstReaderTest(unittest.TestCase):
}) })
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author': u'Alexis Métaireau', 'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata', 'title': 'Rst with filename metadata',
'date': datetime.datetime(2012, 11, 29), 'date': datetime.datetime(2012, 11, 29),
'slug': 'article_with_filename_metadata', 'slug': 'article_with_filename_metadata',
@ -101,7 +102,7 @@ class RstReaderTest(unittest.TestCase):
# otherwise, typogrify should be applied # otherwise, typogrify should be applied
content, _ = readers.read_file(_filename('article.rst'), content, _ = readers.read_file(_filename('article.rst'),
settings={'TYPOGRIFY': True}) settings={'TYPOGRIFY': True})
expected = u"<p>This is some content. With some stuff to&nbsp;"\ expected = "<p>This is some content. With some stuff to&nbsp;"\
"&#8220;typogrify&#8221;.</p>\n<p>Now with added "\ "&#8220;typogrify&#8221;.</p>\n<p>Now with added "\
'support for <abbr title="three letter acronym">'\ 'support for <abbr title="three letter acronym">'\
'<span class="caps">TLA</span></abbr>.</p>\n' '<span class="caps">TLA</span></abbr>.</p>\n'
@ -168,7 +169,7 @@ class MdReaderTest(unittest.TestCase):
settings={}) settings={})
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author': u'Alexis Métaireau', 'author': 'Alexis Métaireau',
} }
for key, value in expected.items(): for key, value in expected.items():
self.assertEquals(value, metadata[key], key) self.assertEquals(value, metadata[key], key)
@ -180,7 +181,7 @@ class MdReaderTest(unittest.TestCase):
}) })
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author': u'Alexis Métaireau', 'author': 'Alexis Métaireau',
'date': datetime.datetime(2012, 11, 30), 'date': datetime.datetime(2012, 11, 30),
} }
for key, value in expected.items(): for key, value in expected.items():
@ -195,7 +196,7 @@ class MdReaderTest(unittest.TestCase):
}) })
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author': u'Alexis Métaireau', 'author': 'Alexis Métaireau',
'date': datetime.datetime(2012, 11, 30), 'date': datetime.datetime(2012, 11, 30),
'slug': 'md_w_filename_meta', 'slug': 'md_w_filename_meta',
'mymeta': 'foo', 'mymeta': 'foo',
@ -203,20 +204,6 @@ class MdReaderTest(unittest.TestCase):
for key, value in expected.items(): for key, value in expected.items():
self.assertEquals(value, metadata[key], key) self.assertEquals(value, metadata[key], key)
@unittest.skipUnless(readers.Markdown, "markdown isn't installed")
def test_article_with_summary_metadata(self):
reader = readers.MarkdownReader({})
content, metadata = reader.read(
_filename('article_with_markdown_and_summary_metadata_single.md'))
expected_summary = u'<p>A single-line summary should be supported'\
u' as well as <strong>inline markup</strong>.</p>'
self.assertEquals(expected_summary, metadata['summary'], 'summary')
content, metadata = reader.read(
_filename('article_with_markdown_and_summary_metadata_multi.md'))
expected_summary = u'<p>A multi-line summary should be supported'\
u'\nas well as <strong>inline markup</strong>.</p>'
self.assertEquals(expected_summary, metadata['summary'], 'summary')
class AdReaderTest(unittest.TestCase): class AdReaderTest(unittest.TestCase):
@unittest.skipUnless(readers.asciidoc, "asciidoc isn't installed") @unittest.skipUnless(readers.asciidoc, "asciidoc isn't installed")

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import copy import copy
from os.path import dirname, abspath, join from os.path import dirname, abspath, join
@ -16,7 +18,7 @@ class TestSettingsConfiguration(unittest.TestCase):
self.settings = read_settings(default_conf) self.settings = read_settings(default_conf)
def test_overwrite_existing_settings(self): def test_overwrite_existing_settings(self):
self.assertEqual(self.settings.get('SITENAME'), u"Alexis' log") self.assertEqual(self.settings.get('SITENAME'), "Alexis' log")
self.assertEqual(self.settings.get('SITEURL'), self.assertEqual(self.settings.get('SITEURL'),
'http://blog.notmyidea.org') 'http://blog.notmyidea.org')

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import shutil import shutil
import os import os
import datetime import datetime
@ -41,10 +42,10 @@ class TestUtils(unittest.TestCase):
samples = (('this is a test', 'this-is-a-test'), samples = (('this is a test', 'this-is-a-test'),
('this is a test', 'this-is-a-test'), ('this is a test', 'this-is-a-test'),
(u'this → is ← a ↑ test', 'this-is-a-test'), ('this → is ← a ↑ test', 'this-is-a-test'),
('this--is---a test', 'this-is-a-test'), ('this--is---a test', 'this-is-a-test'),
(u'unicode測試許功蓋你看到了嗎', 'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'), ('unicode測試許功蓋你看到了嗎', 'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
(u'大飯原発4号機、18日夜起動へ', 'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),) ('大飯原発4号機、18日夜起動へ', 'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)
for value, expected in samples: for value, expected in samples:
self.assertEquals(utils.slugify(value), expected) self.assertEquals(utils.slugify(value), expected)

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# from __future__ import unicode_literals
import hashlib import hashlib
import os import os

65
tox.ini
View file

@ -1,15 +1,74 @@
# This tests the unified codebase (py26-py32) of Pelican.
# depends on some external libraries that aren't released yet.
#
# To run Pelican, you will already have checked out and installed them.
#
# Now we must tell tox about this package, otherwise tox would load the old
# libraries from PyPi.
#
# Run tox from the libraries source tree. It will save its package in
# the distshare directory from where the tests here will pick it up.
#
# Do that for
# https://github.com/dmdm/smartypants.git
#
# and typogrify:
# https://github.com/dmdm/typogrify/tree/py3k
#
# and webassets:
# https://github.com/dmdm/webassets/tree/py3k
#
#
# CAVEAT:
# -------
#
# 1/
# Please be aware that my ports of typogrify and webassets are just 2to3'd.
# They are not backwards compatible with Python 2.
#
# 2/
# Webassets still has unresolved issues, so I deactivated it for Py32 tests.
[tox] [tox]
envlist = py26,py27 envlist = py26,py27,py32
[testenv] [testenv]
commands = commands =
nosetests -s tests
unit2 discover [] unit2 discover []
nosetests -s tests
deps =
[testenv:py26]
deps = deps =
nose nose
unittest2 unittest2
mock mock
Markdown Markdown
BeautifulSoup BeautifulSoup4
feedgenerator
typogrify typogrify
webassets webassets
[testenv:py27]
deps =
nose
unittest2
mock
Markdown
BeautifulSoup4
feedgenerator
typogrify
webassets
[testenv:py32]
deps =
nose
unittest2py3k
mock
Markdown
BeautifulSoup4
feedgenerator
# {distshare}/smartypants-1.6.0.3.zip
# {distshare}/typogrify-2.0.0.zip
# {distshare}/webassets-0.8.dev.zip