1
0
Fork 0
forked from github/pelican

Port pelican to python 3.

Stays compatible with 2.x series, thanks to an unified codebase.
This commit is contained in:
Dirk Makowski 2013-01-11 02:57:43 +01:00 committed by Alexis Métaireau
commit 71995d5e1b
43 changed files with 495 additions and 287 deletions

2
.gitignore vendored
View file

@ -11,3 +11,5 @@ tags
.tox
.coverage
htmlcov
six-*.egg/
*.orig

View file

@ -2,11 +2,13 @@ language: python
python:
- "2.6"
- "2.7"
# - "3.2"
before_install:
- sudo apt-get update -qq
- sudo apt-get install -qq ruby-sass
install:
- pip install nose unittest2 mock --use-mirrors
- pip install nose mock --use-mirrors
- if [[ $TRAVIS_PYTHON_VERSION == '3.2' ]]; then pip install --use-mirrors unittest2py3k; else pip install --use-mirrors unittest2; fi
- pip install . --use-mirrors
- pip install Markdown
- pip install webassets

View file

@ -1,8 +1,9 @@
# Tests
unittest2
mock
# Optional Packages
Markdown
BeautifulSoup
BeautifulSoup4
lxml
typogrify
webassets
webassets

View file

@ -4,7 +4,7 @@ Release history
3.2 (XXXX-XX-XX)
================
* [...]
* Support for Python 3!
3.1 (2012-12-04)
================

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys, os
sys.path.append(os.path.abspath('..'))
@ -10,8 +11,8 @@ templates_path = ['_templates']
extensions = ['sphinx.ext.autodoc',]
source_suffix = '.rst'
master_doc = 'index'
project = u'Pelican'
copyright = u'2010, Alexis Metaireau and contributors'
project = 'Pelican'
copyright = '2010, Alexis Metaireau and contributors'
exclude_patterns = ['_build']
version = __version__
release = __major__
@ -34,16 +35,16 @@ htmlhelp_basename = 'Pelicandoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'Pelican.tex', u'Pelican Documentation',
u'Alexis Métaireau', 'manual'),
('index', 'Pelican.tex', 'Pelican Documentation',
'Alexis Métaireau', 'manual'),
]
# -- Options for manual page output --------------------------------------------
man_pages = [
('index', 'pelican', u'pelican documentation',
[u'Alexis Métaireau'], 1),
('pelican-themes', 'pelican-themes', u'A theme manager for Pelican',
[u'Mickaël Raybaud'], 1),
('themes', 'pelican-theming', u'How to create themes for Pelican',
[u'The Pelican contributors'], 1)
('index', 'pelican', 'pelican documentation',
['Alexis Métaireau'], 1),
('pelican-themes', 'pelican-themes', 'A theme manager for Pelican',
['Mickaël Raybaud'], 1),
('themes', 'pelican-theming', 'How to create themes for Pelican',
['The Pelican contributors'], 1)
]

View file

@ -1,3 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os
import re
import sys
@ -55,7 +59,7 @@ class Pelican(object):
self.plugins = self.settings['PLUGINS']
for plugin in self.plugins:
# if it's a string, then import it
if isinstance(plugin, basestring):
if isinstance(plugin, six.string_types):
logger.debug("Loading plugin `{0}' ...".format(plugin))
plugin = __import__(plugin, globals(), locals(), 'module')
@ -265,7 +269,7 @@ def get_instance(args):
settings = read_settings(args.settings, override=get_config(args))
cls = settings.get('PELICAN_CLASS')
if isinstance(cls, basestring):
if isinstance(cls, six.string_types):
module, cls_name = cls.rsplit('.', 1)
module = __import__(module)
cls = getattr(module, cls_name)
@ -311,15 +315,15 @@ def main():
"Nothing to generate.")
files_found_error = False
time.sleep(1) # sleep to avoid cpu load
except Exception, e:
except Exception as e:
logger.warning(
"Caught exception \"{}\". Reloading.".format(e)
)
continue
else:
pelican.run()
except Exception, e:
logger.critical(unicode(e))
except Exception as e:
logger.critical(e)
if (args.verbosity == logging.DEBUG):
raise

View file

@ -1,4 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import copy
import locale
import logging
@ -11,8 +14,10 @@ from sys import platform, stdin
from pelican.settings import _DEFAULT_CONFIG
from pelican.utils import slugify, truncate_html_words, memoized
from pelican.utils import (slugify, truncate_html_words, memoized,
python_2_unicode_compatible)
from pelican import signals
import pelican.utils
logger = logging.getLogger(__name__)
@ -85,13 +90,8 @@ class Page(object):
self.date_format = self.date_format[1]
if hasattr(self, 'date'):
encoded_date = self.date.strftime(
self.date_format.encode('ascii', 'xmlcharrefreplace'))
if platform == 'win32':
self.locale_date = encoded_date.decode(stdin.encoding)
else:
self.locale_date = encoded_date.decode('utf')
self.locale_date = pelican.utils.strftime(self.date,
self.date_format)
# manage status
if not hasattr(self, 'status'):
@ -167,7 +167,7 @@ class Page(object):
origin = '/'.join((siteurl,
self._context['filenames'][value].url))
else:
logger.warning(u"Unable to find {fn}, skipping url"
logger.warning("Unable to find {fn}, skipping url"
" replacement".format(fn=value))
return m.group('markup') + m.group('quote') + origin \
@ -243,10 +243,10 @@ class Article(Page):
class Quote(Page):
base_properties = ('author', 'date')
@python_2_unicode_compatible
class URLWrapper(object):
def __init__(self, name, settings):
self.name = unicode(name)
self.name = name
self.slug = slugify(self.name)
self.settings = settings
@ -257,12 +257,9 @@ class URLWrapper(object):
return hash(self.name)
def __eq__(self, other):
return self.name == unicode(other)
return self.name == other
def __str__(self):
return str(self.name.encode('utf-8', 'replace'))
def __unicode__(self):
return self.name
def _from_settings(self, key, get_page_name=False):
@ -272,14 +269,14 @@ class URLWrapper(object):
Useful for pagination."""
setting = "%s_%s" % (self.__class__.__name__.upper(), key)
value = self.settings[setting]
if not isinstance(value, basestring):
logger.warning(u'%s is set to %s' % (setting, value))
if not isinstance(value, six.string_types):
logger.warning('%s is set to %s' % (setting, value))
return value
else:
if get_page_name:
return unicode(os.path.splitext(value)[0]).format(**self.as_dict())
return os.path.splitext(value)[0].format(**self.as_dict())
else:
return unicode(value).format(**self.as_dict())
return value.format(**self.as_dict())
page_name = property(functools.partial(_from_settings, key='URL', get_page_name=True))
url = property(functools.partial(_from_settings, key='URL'))
@ -292,13 +289,14 @@ class Category(URLWrapper):
class Tag(URLWrapper):
def __init__(self, name, *args, **kwargs):
super(Tag, self).__init__(unicode.strip(name), *args, **kwargs)
super(Tag, self).__init__(name.strip(), *args, **kwargs)
class Author(URLWrapper):
pass
@python_2_unicode_compatible
class StaticContent(object):
def __init__(self, src, dst=None, settings=None):
if not settings:
@ -309,9 +307,6 @@ class StaticContent(object):
self.save_as = os.path.join(settings['OUTPUT_PATH'], self.url)
def __str__(self):
return str(self.filepath.encode('utf-8', 'replace'))
def __unicode__(self):
return self.filepath
@ -319,7 +314,7 @@ def is_valid_content(content, f):
try:
content.check_properties()
return True
except NameError, e:
logger.error(u"Skipping %s: impossible to find informations about"
except NameError as e:
logger.error("Skipping %s: impossible to find informations about"
"'%s'" % (f, e))
return False

View file

@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
import math
import random
import logging
import datetime
import subprocess
import shutil
from codecs import open
@ -119,7 +120,7 @@ class Generator(object):
for item in items:
value = getattr(self, item)
if hasattr(value, 'items'):
value = value.items()
value = list(value.items())
self.context[item] = value
@ -133,8 +134,8 @@ class _FileLoader(BaseLoader):
if template != self.path or not os.path.exists(self.fullpath):
raise TemplateNotFound(template)
mtime = os.path.getmtime(self.fullpath)
with file(self.fullpath) as f:
source = f.read().decode('utf-8')
with open(self.fullpath, 'r', encoding='utf-8') as f:
source = f.read()
return source, self.fullpath, \
lambda: mtime == os.path.getmtime(self.fullpath)
@ -323,8 +324,8 @@ class ArticlesGenerator(Generator):
try:
signals.article_generate_preread.send(self)
content, metadata = read_file(f, settings=self.settings)
except Exception, e:
logger.warning(u'Could not process %s\n%s' % (f, str(e)))
except Exception as e:
logger.warning('Could not process %s\n%s' % (f, str(e)))
continue
# if no category is set, use the name of the path as a category
@ -333,8 +334,7 @@ class ArticlesGenerator(Generator):
if (self.settings['USE_FOLDER_AS_CATEGORY']
and os.path.dirname(f) != article_path):
# if the article is in a subdirectory
category = os.path.basename(os.path.dirname(f))\
.decode('utf-8')
category = os.path.basename(os.path.dirname(f))
else:
# if the article is not in a subdirectory
category = self.settings['DEFAULT_CATEGORY']
@ -366,8 +366,8 @@ class ArticlesGenerator(Generator):
elif article.status == "draft":
self.drafts.append(article)
else:
logger.warning(u"Unknown status %s for file %s, skipping it." %
(repr(unicode.encode(article.status, 'utf-8')),
logger.warning("Unknown status %s for file %s, skipping it." %
(repr(article.status),
repr(f)))
self.articles, self.translations = process_translations(all_articles)
@ -394,7 +394,7 @@ class ArticlesGenerator(Generator):
tag_cloud = sorted(tag_cloud.items(), key=itemgetter(1), reverse=True)
tag_cloud = tag_cloud[:self.settings.get('TAG_CLOUD_MAX_ITEMS')]
tags = map(itemgetter(1), tag_cloud)
tags = list(map(itemgetter(1), tag_cloud))
if tags:
max_count = max(tags)
steps = self.settings.get('TAG_CLOUD_STEPS')
@ -450,8 +450,8 @@ class PagesGenerator(Generator):
exclude=self.settings['PAGE_EXCLUDES']):
try:
content, metadata = read_file(f, settings=self.settings)
except Exception, e:
logger.warning(u'Could not process %s\n%s' % (f, str(e)))
except Exception as e:
logger.warning('Could not process %s\n%s' % (f, str(e)))
continue
signals.pages_generate_context.send(self, metadata=metadata)
page = Page(content, metadata, settings=self.settings,
@ -466,8 +466,8 @@ class PagesGenerator(Generator):
elif page.status == "hidden":
hidden_pages.append(page)
else:
logger.warning(u"Unknown status %s for file %s, skipping it." %
(repr(unicode.encode(page.status, 'utf-8')),
logger.warning("Unknown status %s for file %s, skipping it." %
(repr(page.status),
repr(f)))
self.pages, self.translations = process_translations(all_pages)
@ -550,7 +550,7 @@ class PdfGenerator(Generator):
# print "Generating pdf for", obj.filename, " in ", output_pdf
with open(obj.filename) as f:
self.pdfcreator.createPdf(text=f.read(), output=output_pdf)
logger.info(u' [ok] writing %s' % output_pdf)
logger.info(' [ok] writing %s' % output_pdf)
def generate_context(self):
pass
@ -558,7 +558,7 @@ class PdfGenerator(Generator):
def generate_output(self, writer=None):
# we don't use the writer passed as argument here
# since we write our own files
logger.info(u' Generating PDF files...')
logger.info(' Generating PDF files...')
pdf_path = os.path.join(self.output_path, 'pdf')
if not os.path.exists(pdf_path):
try:
@ -583,6 +583,6 @@ class SourceFileGenerator(Generator):
copy('', obj.filename, dest)
def generate_output(self, writer=None):
logger.info(u' Generating source files...')
logger.info(' Generating source files...')
for object in chain(self.context['articles'], self.context['pages']):
self._create_source(object, self.output_path)

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
__all__ = [
'init'
]
@ -9,7 +12,7 @@ import logging
from logging import Formatter, getLogger, StreamHandler, DEBUG
RESET_TERM = u'\033[0;m'
RESET_TERM = '\033[0;m'
COLOR_CODES = {
'red': 31,
@ -24,37 +27,38 @@ COLOR_CODES = {
def ansi(color, text):
"""Wrap text in an ansi escape sequence"""
code = COLOR_CODES[color]
return u'\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM)
return '\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM)
class ANSIFormatter(Formatter):
"""
Convert a `logging.LogReport' object into colored text, using ANSI escape sequences.
Convert a `logging.LogRecord' object into colored text, using ANSI escape sequences.
"""
## colors:
def format(self, record):
if record.levelname is 'INFO':
return ansi('cyan', '-> ') + unicode(record.msg)
elif record.levelname is 'WARNING':
return ansi('yellow', record.levelname) + ': ' + unicode(record.msg)
elif record.levelname is 'ERROR':
return ansi('red', record.levelname) + ': ' + unicode(record.msg)
elif record.levelname is 'CRITICAL':
return ansi('bgred', record.levelname) + ': ' + unicode(record.msg)
elif record.levelname is 'DEBUG':
return ansi('bggrey', record.levelname) + ': ' + unicode(record.msg)
msg = str(record.msg)
if record.levelname == 'INFO':
return ansi('cyan', '-> ') + msg
elif record.levelname == 'WARNING':
return ansi('yellow', record.levelname) + ': ' + msg
elif record.levelname == 'ERROR':
return ansi('red', record.levelname) + ': ' + msg
elif record.levelname == 'CRITICAL':
return ansi('bgred', record.levelname) + ': ' + msg
elif record.levelname == 'DEBUG':
return ansi('bggrey', record.levelname) + ': ' + msg
else:
return ansi('white', record.levelname) + ': ' + unicode(record.msg)
return ansi('white', record.levelname) + ': ' + msg
class TextFormatter(Formatter):
"""
Convert a `logging.LogReport' object into text.
Convert a `logging.LogRecord' object into text.
"""
def format(self, record):
if not record.levelname or record.levelname is 'INFO':
if not record.levelname or record.levelname == 'INFO':
return record.msg
else:
return record.levelname + ': ' + record.msg

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
# From django.core.paginator
from math import ceil
@ -37,7 +40,7 @@ class Paginator(object):
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
return list(range(1, self.num_pages + 1))
page_range = property(_get_page_range)

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Asset management plugin for Pelican
===================================

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
"""
Copyright (c) Marco Milanesi <kpanic@gnufunk.org>

View file

@ -68,7 +68,7 @@ def create_gzip_file(filepath):
logger.debug('Compressing: %s' % filepath)
compressed = gzip.open(compressed_path, 'wb')
compressed.writelines(uncompressed)
except Exception, ex:
except Exception as ex:
logger.critical('Gzip compression failed: %s' % ex)
finally:
compressed.close()

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pelican import log
"""
HTML tags for reStructuredText
@ -52,7 +53,7 @@ class RawHtml(Directive):
has_content = True
def run(self):
html = u' '.join(self.content)
html = ' '.join(self.content)
node = nodes.raw('', html, format='html')
return [node]

View file

@ -1,7 +1,9 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from pelican import signals
def test(sender):
print "%s initialized !!" % sender
print("%s initialized !!" % sender)
def register():
signals.initialized.connect(test)

View file

@ -41,8 +41,8 @@ def add_related_posts(generator, metadata):
if len(related_posts) < 1:
return
relation_score = dict(zip(set(related_posts), map(related_posts.count,
set(related_posts))))
relation_score = dict(list(zip(set(related_posts), list(map(related_posts.count,
set(related_posts))))))
ranked_related = sorted(relation_score, key=relation_score.get)
metadata["related_posts"] = ranked_related[:5]

View file

@ -1,3 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import os.path
@ -7,19 +10,19 @@ from codecs import open
from pelican import signals, contents
TXT_HEADER = u"""{0}/index.html
TXT_HEADER = """{0}/index.html
{0}/archives.html
{0}/tags.html
{0}/categories.html
"""
XML_HEADER = u"""<?xml version="1.0" encoding="utf-8"?>
XML_HEADER = """<?xml version="1.0" encoding="utf-8"?>
<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
"""
XML_URL = u"""
XML_URL = """
<url>
<loc>{0}/{1}</loc>
<lastmod>{2}</lastmod>
@ -28,7 +31,7 @@ XML_URL = u"""
</url>
"""
XML_FOOTER = u"""
XML_FOOTER = """
</urlset>
"""
@ -86,7 +89,7 @@ class SitemapGenerator(object):
'yearly', 'never')
if isinstance(pris, dict):
for k, v in pris.iteritems():
for k, v in pris.items():
if k in valid_keys and not isinstance(v, (int, float)):
default = self.priorities[k]
warning("sitemap plugin: priorities must be numbers")
@ -99,7 +102,7 @@ class SitemapGenerator(object):
warning("sitemap plugin: using the default values")
if isinstance(chfreqs, dict):
for k, v in chfreqs.iteritems():
for k, v in chfreqs.items():
if k in valid_keys and v not in valid_chfreqs:
default = self.changefreqs[k]
warning("sitemap plugin: invalid changefreq `{0}'".format(v))

View file

@ -1,4 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os
import re
try:
@ -20,15 +23,16 @@ try:
asciidoc = True
except ImportError:
asciidoc = False
import re
from pelican.contents import Category, Tag, Author
from pelican.utils import get_date, pelican_open
_METADATA_PROCESSORS = {
'tags': lambda x, y: [Tag(tag, y) for tag in unicode(x).split(',')],
'tags': lambda x, y: [Tag(tag, y) for tag in x.split(',')],
'date': lambda x, y: get_date(x),
'status': lambda x, y: unicode.strip(x),
'status': lambda x, y: x.strip(),
'category': Category,
'author': Author,
}
@ -242,7 +246,7 @@ def read_file(filename, fmt=None, settings=None):
if filename_metadata:
match = re.match(filename_metadata, base)
if match:
for k, v in match.groupdict().iteritems():
for k, v in match.groupdict().items():
if k not in metadata:
k = k.lower() # metadata must be lowercase
metadata[k] = reader.process_metadata(k, v)

View file

@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from docutils import nodes, utils
from docutils.parsers.rst import directives, roles, Directive
from pygments.formatters import HtmlFormatter
@ -32,7 +34,7 @@ class Pygments(Directive):
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments)

20
pelican/server.py Normal file
View file

@ -0,0 +1,20 @@
from __future__ import print_function
try:
import SimpleHTTPServer as srvmod
except ImportError:
import http.server as srvmod
try:
import SocketServer as socketserver
except ImportError:
import socketserver
PORT = 8000
Handler = srvmod.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()

View file

@ -1,11 +1,13 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import copy
import imp
import inspect
import os
import locale
import logging
import re
from os.path import isabs
@ -54,8 +56,8 @@ _DEFAULT_CONFIG = {'PATH': '.',
'CATEGORY_SAVE_AS': 'category/{slug}.html',
'TAG_URL': 'tag/{slug}.html',
'TAG_SAVE_AS': 'tag/{slug}.html',
'AUTHOR_URL': u'author/{slug}.html',
'AUTHOR_SAVE_AS': u'author/{slug}.html',
'AUTHOR_URL': 'author/{slug}.html',
'AUTHOR_SAVE_AS': 'author/{slug}.html',
'RELATIVE_URLS': True,
'DEFAULT_LANG': 'en',
'TAG_CLOUD_STEPS': 4,
@ -146,7 +148,7 @@ def configure_settings(settings):
# if locales is not a list, make it one
locales = settings['LOCALE']
if isinstance(locales, basestring):
if isinstance(locales, six.string_types):
locales = [locales]
# try to set the different locales, fallback on the default.
@ -155,7 +157,7 @@ def configure_settings(settings):
for locale_ in locales:
try:
locale.setlocale(locale.LC_ALL, locale_)
locale.setlocale(locale.LC_ALL, str(locale_))
break # break if it is successful
except locale.Error:
pass
@ -200,14 +202,14 @@ def configure_settings(settings):
"of the Webassets plugin")
if 'OUTPUT_SOURCES_EXTENSION' in settings:
if not isinstance(settings['OUTPUT_SOURCES_EXTENSION'], str):
if not isinstance(settings['OUTPUT_SOURCES_EXTENSION'], six.string_types):
settings['OUTPUT_SOURCES_EXTENSION'] = _DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION']
logger.warn("Detected misconfiguration with OUTPUT_SOURCES_EXTENSION."
" falling back to the default extension " +
_DEFAULT_CONFIG['OUTPUT_SOURCES_EXTENSION'])
filename_metadata = settings.get('FILENAME_METADATA')
if filename_metadata and not isinstance(filename_metadata, basestring):
if filename_metadata and not isinstance(filename_metadata, six.string_types):
logger.error("Detected misconfiguration with FILENAME_METADATA"
" setting (must be string or compiled pattern), falling"
"back to the default")

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from blinker import signal
initialized = signal('pelican_initialized')

View file

@ -1,7 +1,12 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import argparse
from HTMLParser import HTMLParser
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
import os
import subprocess
import sys
@ -15,14 +20,14 @@ from pelican.utils import slugify
def wp2fields(xml):
"""Opens a wordpress XML file, and yield pelican fields"""
try:
from BeautifulSoup import BeautifulStoneSoup
from bs4 import BeautifulSoup
except ImportError:
error = ('Missing dependency '
'"BeautifulSoup" required to import Wordpress XML files.')
'"BeautifulSoup4" and "lxml" required to import Wordpress XML files.')
sys.exit(error)
xmlfile = open(xml, encoding='utf-8').read()
soup = BeautifulStoneSoup(xmlfile)
soup = BeautifulSoup(xmlfile, "xml")
items = soup.rss.channel.findAll('item')
for item in items:
@ -54,10 +59,10 @@ def wp2fields(xml):
def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields"""
try:
from BeautifulSoup import BeautifulStoneSoup
from bs4 import BeautifulSoup
except ImportError:
error = ('Missing dependency '
'"BeautifulSoup" required to import Dotclear files.')
'"BeautifulSoup4" and "lxml" required to import Dotclear files.')
sys.exit(error)
@ -142,13 +147,27 @@ def dc2fields(file):
if len(tag) > 1:
if int(tag[:1]) == 1:
newtag = tag.split('"')[1]
tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES )))
tags.append(
BeautifulSoup(
newtag
, "xml"
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
else:
i=1
j=1
while(i <= int(tag[:1])):
newtag = tag.split('"')[j].replace('\\','')
tags.append(unicode(BeautifulStoneSoup(newtag,convertEntities=BeautifulStoneSoup.HTML_ENTITIES )))
tags.append(
BeautifulSoup(
newtag
, "xml"
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
i=i+1
if j < int(tag[:1])*2:
j=j+2
@ -244,7 +263,7 @@ def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=Fals
# Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion
paragraphs = content.splitlines()
paragraphs = [u'<p>{0}</p>'.format(p) for p in paragraphs]
paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]
new_content = ''.join(paragraphs)
fp.write(new_content)
@ -264,7 +283,7 @@ def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=Fals
elif rc > 0:
error = "Please, check your Pandoc installation."
exit(error)
except OSError, e:
except OSError as e:
error = "Pandoc execution failed: %s" % e
exit(error)
@ -284,7 +303,7 @@ def fields2pelican(fields, out_markup, output_path, dircat=False, strip_raw=Fals
def main():
parser = argparse.ArgumentParser(
description="Transform feed, Wordpress or Dotclear files to reST (rst) "
"or Markdown (md) files. Be sure to have pandoc installed.",
"or Markdown (md) files. Be sure to have pandoc installed",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='input', help='The input file to read')
@ -304,10 +323,10 @@ def main():
help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)")
parser.add_argument('--disable-slugs', action='store_true',
dest='disable_slugs',
help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.')
dest='disable_slugs',
help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.')
args = parser.parse_args()
@ -339,4 +358,4 @@ def main():
fields2pelican(fields, args.markup, args.output,
dircat=args.dircat or False,
strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False)
strip_slugs=args.disable_slugs or False)

View file

@ -1,5 +1,8 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os
import string
@ -29,11 +32,22 @@ CONF = {
'lang': 'en'
}
def _input_compat(prompt):
if six.PY3:
r = input(prompt)
else:
r = raw_input(prompt).decode('utf-8')
return r
if six.PY3:
str_compat = str
else:
str_compat = unicode
def decoding_strings(f):
def wrapper(*args, **kwargs):
out = f(*args, **kwargs)
if isinstance(out, basestring):
if isinstance(out, six.string_types):
# todo: make encoding configurable?
return out.decode(sys.stdin.encoding)
return out
@ -55,14 +69,14 @@ def get_template(name, as_encoding='utf-8'):
@decoding_strings
def ask(question, answer=str, default=None, l=None):
if answer == str:
def ask(question, answer=str_compat, default=None, l=None):
if answer == str_compat:
r = ''
while True:
if default:
r = raw_input('> {0} [{1}] '.format(question, default))
r = _input_compat('> {0} [{1}] '.format(question, default))
else:
r = raw_input('> {0} '.format(question, default))
r = _input_compat('> {0} '.format(question, default))
r = r.strip()
@ -84,11 +98,11 @@ def ask(question, answer=str, default=None, l=None):
r = None
while True:
if default is True:
r = raw_input('> {0} (Y/n) '.format(question))
r = _input_compat('> {0} (Y/n) '.format(question))
elif default is False:
r = raw_input('> {0} (y/N) '.format(question))
r = _input_compat('> {0} (y/N) '.format(question))
else:
r = raw_input('> {0} (y/n) '.format(question))
r = _input_compat('> {0} (y/n) '.format(question))
r = r.strip().lower()
@ -108,9 +122,9 @@ def ask(question, answer=str, default=None, l=None):
r = None
while True:
if default:
r = raw_input('> {0} [{1}] '.format(question, default))
r = _input_compat('> {0} [{1}] '.format(question, default))
else:
r = raw_input('> {0} '.format(question))
r = _input_compat('> {0} '.format(question))
r = r.strip()
@ -125,7 +139,7 @@ def ask(question, answer=str, default=None, l=None):
print('You must enter an integer')
return r
else:
raise NotImplemented('Argument `answer` must be str, bool, or integer')
raise NotImplemented('Argument `answer` must be str_compat, bool, or integer')
def main():
@ -158,14 +172,14 @@ needed by Pelican.
print('Using project associated with current virtual environment.'
'Will save to:\n%s\n' % CONF['basedir'])
else:
CONF['basedir'] = os.path.abspath(ask('Where do you want to create your new web site?', answer=str, default=args.path))
CONF['basedir'] = os.path.abspath(ask('Where do you want to create your new web site?', answer=str_compat, default=args.path))
CONF['sitename'] = ask('What will be the title of this web site?', answer=str, default=args.title)
CONF['author'] = ask('Who will be the author of this web site?', answer=str, default=args.author)
CONF['lang'] = ask('What will be the default language of this web site?', str, args.lang or CONF['lang'], 2)
CONF['sitename'] = ask('What will be the title of this web site?', answer=str_compat, default=args.title)
CONF['author'] = ask('Who will be the author of this web site?', answer=str_compat, default=args.author)
CONF['lang'] = ask('What will be the default language of this web site?', str_compat, args.lang or CONF['lang'], 2)
if ask('Do you want to specify a URL prefix? e.g., http://example.com ', answer=bool, default=True):
CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str, CONF['siteurl'])
CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str_compat, CONF['siteurl'])
CONF['with_pagination'] = ask('Do you want to enable article pagination?', bool, bool(CONF['default_pagination']))
@ -179,38 +193,38 @@ needed by Pelican.
if mkfile:
if ask('Do you want to upload your website using FTP?', answer=bool, default=False):
CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str, CONF['ftp_host'])
CONF['ftp_user'] = ask('What is your username on that server?', str, CONF['ftp_user'])
CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str, CONF['ftp_target_dir'])
CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str_compat, CONF['ftp_host'])
CONF['ftp_user'] = ask('What is your username on that server?', str_compat, CONF['ftp_user'])
CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ftp_target_dir'])
if ask('Do you want to upload your website using SSH?', answer=bool, default=False):
CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str, CONF['ssh_host'])
CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str_compat, CONF['ssh_host'])
CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port'])
CONF['ssh_user'] = ask('What is your username on that server?', str, CONF['ssh_user'])
CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str, CONF['ssh_target_dir'])
CONF['ssh_user'] = ask('What is your username on that server?', str_compat, CONF['ssh_user'])
CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ssh_target_dir'])
if ask('Do you want to upload your website using Dropbox?', answer=bool, default=False):
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str, CONF['dropbox_dir'])
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str_compat, CONF['dropbox_dir'])
try:
os.makedirs(os.path.join(CONF['basedir'], 'content'))
except OSError, e:
except OSError as e:
print('Error: {0}'.format(e))
try:
os.makedirs(os.path.join(CONF['basedir'], 'output'))
except OSError, e:
except OSError as e:
print('Error: {0}'.format(e))
try:
with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w', 'utf-8') as fd:
conf_python = dict()
for key, value in CONF.iteritems():
for key, value in CONF.items():
conf_python[key] = repr(value)
for line in get_template('pelicanconf.py'):
template = string.Template(line)
fd.write(template.safe_substitute(conf_python))
fd.close()
except OSError, e:
except OSError as e:
print('Error: {0}'.format(e))
try:
@ -219,7 +233,7 @@ needed by Pelican.
template = string.Template(line)
fd.write(template.safe_substitute(CONF))
fd.close()
except OSError, e:
except OSError as e:
print('Error: {0}'.format(e))
if mkfile:
@ -229,13 +243,13 @@ needed by Pelican.
template = string.Template(line)
fd.write(template.safe_substitute(CONF))
fd.close()
except OSError, e:
except OSError as e:
print('Error: {0}'.format(e))
if develop:
conf_shell = dict()
for key, value in CONF.iteritems():
if isinstance(value, basestring) and ' ' in value:
for key, value in CONF.items():
if isinstance(value, six.string_types) and ' ' in value:
value = '"' + value.replace('"', '\\"') + '"'
conf_shell[key] = value
try:
@ -244,8 +258,8 @@ needed by Pelican.
template = string.Template(line)
fd.write(template.safe_substitute(conf_shell))
fd.close()
os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 0755)
except OSError, e:
os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 493) # mode 0o755
except OSError as e:
print('Error: {0}'.format(e))
print('Done. Your new project is available at %s' % CONF['basedir'])

View file

@ -1,5 +1,8 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import argparse
import os
@ -28,7 +31,7 @@ _BUILTIN_THEMES = ['simple', 'notmyidea']
def err(msg, die=None):
"""Print an error message and exits if an exit code is given"""
sys.stderr.write(str(msg) + '\n')
sys.stderr.write(msg + '\n')
if die:
sys.exit((die if type(die) is int else 1))
@ -186,13 +189,13 @@ def install(path, v=False, u=False):
for root, dirs, files in os.walk(theme_path):
for d in dirs:
dname = os.path.join(root, d)
os.chmod(dname, 0755)
os.chmod(dname, 493) # 0o755
for f in files:
fname = os.path.join(root, f)
os.chmod(fname, 0644)
except OSError, e:
os.chmod(fname, 420) # 0o644
except OSError as e:
err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False)
except Exception, e:
except Exception as e:
err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
@ -212,7 +215,7 @@ def symlink(path, v=False):
print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
try:
os.symlink(path, theme_path)
except Exception, e:
except Exception as e:
err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
@ -233,7 +236,7 @@ def clean(v=False):
print('Removing {0}'.format(path))
try:
os.remove(path)
except OSError, e:
except OSError as e:
print('Error: cannot remove {0}'.format(path))
else:
c+=1

View file

@ -49,7 +49,7 @@ regenerate: clean
$$(PELICAN) -r $$(INPUTDIR) -o $$(OUTPUTDIR) -s $$(CONFFILE) $$(PELICANOPTS)
serve:
cd $$(OUTPUTDIR) && python -m SimpleHTTPServer
cd $$(OUTPUTDIR) && python -m pelican.server
devserver:
$$(BASEDIR)/develop_server.sh restart

View file

@ -20,7 +20,7 @@ PELICAN_PID=$$BASEDIR/pelican.pid
function usage(){
echo "usage: $$0 (stop) (start) (restart)"
echo "This starts pelican in debug and reload mode and then launches"
echo "A SimpleHTTP server to help site development. It doesn't read"
echo "A pelican.server to help site development. It doesn't read"
echo "your pelican options so you edit any paths in your Makefile"
echo "you will need to edit it as well"
exit 3
@ -31,14 +31,14 @@ function shut_down(){
PID=$$(cat $$SRV_PID)
PROCESS=$$(ps -p $$PID | tail -n 1 | awk '{print $$4}')
if [[ $$PROCESS != "" ]]; then
echo "Killing SimpleHTTPServer"
echo "Killing pelican.server"
kill $$PID
else
echo "Stale PID, deleting"
fi
rm $$SRV_PID
else
echo "SimpleHTTPServer PIDFile not found"
echo "pelican.server PIDFile not found"
fi
if [[ -f $$PELICAN_PID ]]; then
@ -57,15 +57,15 @@ function shut_down(){
}
function start_up(){
echo "Starting up Pelican and SimpleHTTPServer"
echo "Starting up Pelican and pelican.server"
shift
$$PELICAN --debug --autoreload -r $$INPUTDIR -o $$OUTPUTDIR -s $$CONFFILE $$PELICANOPTS &
echo $$! > $$PELICAN_PID
cd $$OUTPUTDIR
python -m SimpleHTTPServer &
python -m pelican.server &
echo $$! > $$SRV_PID
cd $$BASEDIR
sleep 1 && echo 'Pelican and SimpleHTTPServer processes now running in background.'
sleep 1 && echo 'Pelican and pelican.server processes now running in background.'
}
###

View file

@ -1,10 +1,14 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import os
import re
import pytz
import shutil
import logging
import errno
import locale
from collections import defaultdict, Hashable
from functools import partial
@ -17,6 +21,77 @@ from operator import attrgetter
logger = logging.getLogger(__name__)
def strftime(date, date_format):
"""
Replacement for the builtin strftime().
This :func:`strftime()` is compatible to Python 2 and 3. In both cases,
input and output is always unicode.
Still, Python 3's :func:`strftime()` seems to somehow "normalize" unicode
chars in the format string. So if e.g. your format string contains 'ø' or
'ä', the result will be 'o' and 'a'.
See here for an `extensive testcase <https://github.com/dmdm/test_strftime>`_.
:param date: Any object that sports a :meth:`strftime()` method.
:param date_format: Format string, can always be unicode.
:returns: Unicode string with formatted date.
"""
# As tehkonst confirmed, above mentioned testcase runs correctly on
# Python 2 and 3 on Windows as well. Thanks.
if six.PY3:
# It could be so easy... *sigh*
return date.strftime(date_format)
# TODO Perhaps we should refactor again, so that the
# xmlcharrefreplace-regex-dance is always done, regardless
# of the Python version.
else:
# We must ensure that the format string is an encoded byte
# string, ASCII only WTF!!!
# But with "xmlcharrefreplace" our formatted date will produce
# *yuck* like this:
# "Øl trinken beim Besäufnis"
# --> "&#216;l trinken beim Bes&#228;ufnis"
date_format = date_format.encode('ascii',
errors="xmlcharrefreplace")
result = date.strftime(date_format)
# strftime() returns an encoded byte string
# which we must decode into unicode.
lang_code, enc = locale.getlocale(locale.LC_ALL)
if enc:
result = result.decode(enc)
else:
result = unicode(result)
# Convert XML character references back to unicode characters.
if "&#" in result:
result = re.sub(r'&#(?P<num>\d+);'
, lambda m: unichr(int(m.group('num')))
, result
)
return result
#----------------------------------------------------------------------------
# Stolen from Django: django.utils.encoding
#
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
#----------------------------------------------------------------------------
class NoFilesError(Exception):
pass
@ -78,14 +153,24 @@ def slugify(value):
Took from django sources.
"""
# TODO Maybe steal again from current Django 1.5dev
value = Markup(value).striptags()
if type(value) == unicode:
import unicodedata
from unidecode import unidecode
value = unicode(unidecode(value))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+', '-', value)
# value must be unicode per se
import unicodedata
from unidecode import unidecode
# unidecode returns str in Py2 and 3, so in Py2 we have to make
# it unicode again
value = unidecode(value)
if isinstance(value, six.binary_type):
value = value.decode('ascii')
# still unicode
value = unicodedata.normalize('NFKD', value)
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
# we want only ASCII chars
value = value.encode('ascii', 'ignore')
# but Pelican should generally use only unicode
return value.decode('ascii')
def copy(path, source, destination, destination_path=None, overwrite=False):
@ -137,7 +222,7 @@ def clean_output_dir(path):
if not os.path.isdir(path):
try:
os.remove(path)
except Exception, e:
except Exception as e:
logger.error("Unable to delete file %s; %e" % path, e)
return
@ -148,13 +233,13 @@ def clean_output_dir(path):
try:
shutil.rmtree(file)
logger.debug("Deleted directory %s" % file)
except Exception, e:
except Exception as e:
logger.error("Unable to delete directory %s; %e" % file, e)
elif os.path.isfile(file) or os.path.islink(file):
try:
os.remove(file)
logger.debug("Deleted file/link %s" % file)
except Exception, e:
except Exception as e:
logger.error("Unable to delete file %s; %e" % file, e)
else:
logger.error("Unable to delete %s, file type unknown" % file)
@ -180,7 +265,7 @@ def truncate_html_words(s, num, end_text='...'):
"""
length = int(num)
if length <= 0:
return u''
return ''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area',
'hr', 'input')
@ -254,10 +339,10 @@ def process_translations(content_list):
for slug, items in grouped_by_slugs:
items = list(items)
# find items with default language
default_lang_items = filter(attrgetter('in_default_lang'), items)
default_lang_items = list(filter(attrgetter('in_default_lang'), items))
len_ = len(default_lang_items)
if len_ > 1:
logger.warning(u'there are %s variants of "%s"' % (len_, slug))
logger.warning('there are %s variants of "%s"' % (len_, slug))
for x in default_lang_items:
logger.warning(' %s' % x.filename)
elif len_ == 0:
@ -269,12 +354,9 @@ def process_translations(content_list):
+ 'content'
logger.warning(msg)
index.extend(default_lang_items)
translations.extend(filter(
lambda x: x not in default_lang_items,
items
))
translations.extend([x for x in items if x not in default_lang_items])
for a in items:
a.translations = filter(lambda x: x != a, items)
a.translations = [x for x in items if x != a]
return index, translations
@ -333,6 +415,6 @@ def set_date_tzinfo(d, tz_name=None):
def mkdir_p(path):
try:
os.makedirs(path)
except OSError, e:
except OSError as e:
if e.errno != errno.EEXIST:
raise

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import with_statement
from __future__ import with_statement, unicode_literals, print_function
import six
import os
import locale
@ -57,7 +58,7 @@ class Writer(object):
:param feed_type: the feed type to use (atom or rss)
"""
old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, str('C'))
try:
self.site_url = context.get('SITEURL', get_relative_path(filename))
self.feed_domain = context.get('FEED_DOMAIN')
@ -68,7 +69,7 @@ class Writer(object):
max_items = len(elements)
if self.settings['FEED_MAX_ITEMS']:
max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)
for i in xrange(max_items):
for i in range(max_items):
self._add_item_to_the_feed(feed, elements[i])
if filename:
@ -77,7 +78,7 @@ class Writer(object):
os.makedirs(os.path.dirname(complete_path))
except Exception:
pass
fp = open(complete_path, 'w')
fp = open(complete_path, 'w', encoding='utf-8' if six.PY3 else None)
feed.write(fp, 'utf-8')
logger.info('writing %s' % complete_path)
@ -108,7 +109,7 @@ class Writer(object):
def _write_file(template, localcontext, output_path, name):
"""Render the template write the file."""
old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, str('C'))
try:
output = template.render(localcontext)
finally:
@ -120,7 +121,7 @@ class Writer(object):
pass
with open(filename, 'w', encoding='utf-8') as f:
f.write(output)
logger.info(u'writing %s' % filename)
logger.info('writing %s' % filename)
localcontext = context.copy()
if relative_urls:
@ -135,7 +136,7 @@ class Writer(object):
if paginated:
# pagination needed, init paginators
paginators = {}
for key in paginated.iterkeys():
for key in paginated.keys():
object_list = paginated[key]
if self.settings.get('DEFAULT_PAGINATION'):
@ -147,9 +148,9 @@ class Writer(object):
# generated pages, and write
name_root, ext = os.path.splitext(name)
for page_num in range(paginators.values()[0].num_pages):
for page_num in range(list(paginators.values())[0].num_pages):
paginated_localcontext = localcontext.copy()
for key in paginators.iterkeys():
for key in paginators.keys():
paginator = paginators[key]
page = paginator.page(page_num + 1)
paginated_localcontext.update(

View file

@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
AUTHOR = u'Alexis Métaireau'
SITENAME = u"Alexis' log"
from __future__ import unicode_literals
AUTHOR = 'Alexis Métaireau'
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org'
TIMEZONE = "Europe/Paris"
@ -10,7 +12,7 @@ PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
LOCALE = "C"
DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 03, 02, 14, 01, 01)
DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
FEED_ALL_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'
@ -19,7 +21,7 @@ LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"),
(u'Tarek Ziadé', "http://ziade.org/blog"),
('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),

View file

@ -1,8 +1,8 @@
#!/usr/bin/env python
from setuptools import setup
requires = ['feedgenerator', 'jinja2 >= 2.6', 'pygments', 'docutils', 'pytz',
'blinker', 'unidecode']
requires = ['feedgenerator>=1.5', 'jinja2 >= 2.6', 'pygments', 'docutils', 'pytz',
'blinker', 'unidecode', 'six']
try:
import argparse # NOQA

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
AUTHOR = u'Alexis Métaireau'
SITENAME = u"Alexis' log"
from __future__ import unicode_literals, print_function
AUTHOR = 'Alexis Métaireau'
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org'
TIMEZONE = 'UTC'
@ -18,7 +19,7 @@ LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"),
(u'Tarek Ziadé', "http://ziade.org/blog"),
('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),

View file

@ -1,13 +1,15 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
__all__ = [
'get_article',
'unittest',
]
import cStringIO
import os
import re
import subprocess
import sys
from six import StringIO
import logging
from logging.handlers import BufferingHandler
@ -101,7 +103,7 @@ def mute(returns_output=False):
def wrapper(*args, **kwargs):
saved_stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
sys.stdout = StringIO()
try:
out = func(*args, **kwargs)

View file

@ -113,8 +113,8 @@ class TestPage(unittest.TestCase):
page = Page(**page_kwargs)
self.assertEqual(page.locale_date,
unicode(dt.strftime(_DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']),
'utf-8'))
dt.strftime(_DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']))
page_kwargs['settings'] = dict([(x, _DEFAULT_CONFIG[x]) for x in
_DEFAULT_CONFIG])
@ -131,7 +131,7 @@ class TestPage(unittest.TestCase):
import locale as locale_module
try:
page = Page(**page_kwargs)
self.assertEqual(page.locale_date, u'2015-09-13(\u65e5)')
self.assertEqual(page.locale_date, '2015-09-13(\u65e5)')
except locale_module.Error:
# The constructor of ``Page`` will try to set the locale to
# ``ja_JP.utf8``. But this attempt will failed when there is no

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from mock import MagicMock
import os
@ -31,7 +32,7 @@ class TestArticlesGenerator(unittest.TestCase):
settings = get_settings()
settings['ARTICLE_DIR'] = 'content'
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 01, 01)
settings['DEFAULT_DATE'] = (1970, 1, 1)
self.generator = ArticlesGenerator(settings.copy(), settings,
CUR_DIR, settings['THEME'], None,
settings['MARKUP'])
@ -70,18 +71,18 @@ class TestArticlesGenerator(unittest.TestCase):
generator = self.get_populated_generator()
articles = self.distill_articles(generator.articles)
articles_expected = [
[u'Article title', 'published', 'Default', 'article'],
[u'Article with markdown and summary metadata single', 'published', u'Default', 'article'],
[u'Article with markdown and summary metadata multi', 'published', u'Default', 'article'],
[u'Article with template', 'published', 'Default', 'custom'],
[u'Test md File', 'published', 'test', 'article'],
[u'Rst with filename metadata', 'published', u'yeah', 'article'],
[u'Test Markdown extensions', 'published', u'Default', 'article'],
[u'This is a super article !', 'published', 'Yeah', 'article'],
[u'This is an article with category !', 'published', 'yeah', 'article'],
[u'This is an article without category !', 'published', 'Default', 'article'],
[u'This is an article without category !', 'published', 'TestCategory', 'article'],
[u'This is a super article !', 'published', 'yeah', 'article']
['Article title', 'published', 'Default', 'article'],
['Article with markdown and summary metadata single', 'published', 'Default', 'article'],
['Article with markdown and summary metadata multi', 'published', 'Default', 'article'],
['Article with template', 'published', 'Default', 'custom'],
['Test md File', 'published', 'test', 'article'],
['Rst with filename metadata', 'published', 'yeah', 'article'],
['Test Markdown extensions', 'published', 'Default', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'],
['This is an article with category !', 'published', 'yeah', 'article'],
['This is an article without category !', 'published', 'Default', 'article'],
['This is an article without category !', 'published', 'TestCategory', 'article'],
['This is a super article !', 'published', 'yeah', 'article']
]
self.assertItemsEqual(articles_expected, articles)
@ -97,7 +98,7 @@ class TestArticlesGenerator(unittest.TestCase):
settings = _DEFAULT_CONFIG.copy()
settings['ARTICLE_DIR'] = 'content'
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 01, 01)
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['USE_FOLDER_AS_CATEGORY'] = False
settings['filenames'] = {}
generator = ArticlesGenerator(settings.copy(), settings,
@ -179,7 +180,7 @@ class TestPageGenerator(unittest.TestCase):
def test_generate_context(self):
settings = get_settings()
settings['PAGE_DIR'] = 'TestPages'
settings['DEFAULT_DATE'] = (1970, 01, 01)
settings['DEFAULT_DATE'] = (1970, 1, 1)
generator = PagesGenerator(settings.copy(), settings, CUR_DIR,
settings['THEME'], None,
@ -189,14 +190,14 @@ class TestPageGenerator(unittest.TestCase):
hidden_pages = self.distill_pages(generator.hidden_pages)
pages_expected = [
[u'This is a test page', 'published', 'page'],
[u'This is a markdown test page', 'published', 'page'],
[u'This is a test page with a preset template', 'published', 'custom']
['This is a test page', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['This is a test page with a preset template', 'published', 'custom']
]
hidden_pages_expected = [
[u'This is a test hidden page', 'hidden', 'page'],
[u'This is a markdown test hidden page', 'hidden', 'page'],
[u'This is a test hidden page with a custom template', 'hidden', 'custom']
['This is a test hidden page', 'hidden', 'page'],
['This is a markdown test hidden page', 'hidden', 'page'],
['This is a test hidden page with a custom template', 'hidden', 'custom']
]
self.assertItemsEqual(pages_expected,pages)

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
@ -9,7 +10,7 @@ CUR_DIR = os.path.dirname(__file__)
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
try:
import BeautifulSoup
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = False # NOQA
@ -48,26 +49,6 @@ class TestWordpressXmlImporter(unittest.TestCase):
strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
def test_can_toggle_slug_storage(self):
posts = list(self.posts)
r = lambda f: open(f).read()
silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp:
rst_files = (r(f) for f in silent_f2p(posts, 'markdown', temp))
self.assertTrue(all('Slug:' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(posts, 'markdown', temp,
disable_slugs=True))
self.assertFalse(any('Slug:' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(posts, 'rst', temp))
self.assertTrue(all(':slug:' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(posts, 'rst', temp,
disable_slugs=True))
self.assertFalse(any(':slug:' in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self):
posts = list(self.posts)
test_posts = [post for post in posts if post[2] == 'html-entity-test']

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
try:
import unittest2 as unittest
except ImportError:
@ -34,7 +36,7 @@ def recursiveDiff(dcmp):
for f in dcmp.right_only],
}
for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).iteritems():
for k, v in recursiveDiff(sub_dcmp).items():
diff[k] += v
return diff
@ -48,7 +50,7 @@ class TestPelican(unittest.TestCase):
logging.getLogger().addHandler(self.logcount_handler)
self.temp_path = mkdtemp()
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self):
rmtree(self.temp_path)

View file

@ -6,7 +6,7 @@ import tempfile
from pelican.plugins import gzip_cache
from support import unittest, temporary_folder
from .support import unittest, temporary_folder
class TestGzipCache(unittest.TestCase):
'''Unit tests for the gzip cache plugin'''

View file

@ -1,4 +1,5 @@
# coding: utf-8
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import datetime
import os
@ -21,11 +22,11 @@ class RstReaderTest(unittest.TestCase):
content, metadata = reader.read(_filename('article_with_metadata.rst'))
expected = {
'category': 'yeah',
'author': u'Alexis Métaireau',
'author': 'Alexis Métaireau',
'title': 'This is a super article !',
'summary': u'<p class="first last">Multi-line metadata should be'\
u' supported\nas well as <strong>inline'\
u' markup</strong>.</p>\n',
'summary': '<p class="first last">Multi-line metadata should be'\
' supported\nas well as <strong>inline'\
' markup</strong>.</p>\n',
'date': datetime.datetime(2010, 12, 2, 10, 14),
'tags': ['foo', 'bar', 'foobar'],
'custom_field': 'http://notmyidea.org',
@ -40,7 +41,7 @@ class RstReaderTest(unittest.TestCase):
settings={})
expected = {
'category': 'yeah',
'author': u'Alexis Métaireau',
'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata',
}
for key, value in metadata.items():
@ -53,7 +54,7 @@ class RstReaderTest(unittest.TestCase):
})
expected = {
'category': 'yeah',
'author': u'Alexis Métaireau',
'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata',
'date': datetime.datetime(2012, 11, 29),
}
@ -69,7 +70,7 @@ class RstReaderTest(unittest.TestCase):
})
expected = {
'category': 'yeah',
'author': u'Alexis Métaireau',
'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata',
'date': datetime.datetime(2012, 11, 29),
'slug': 'article_with_filename_metadata',
@ -101,7 +102,7 @@ class RstReaderTest(unittest.TestCase):
# otherwise, typogrify should be applied
content, _ = readers.read_file(_filename('article.rst'),
settings={'TYPOGRIFY': True})
expected = u"<p>This is some content. With some stuff to&nbsp;"\
expected = "<p>This is some content. With some stuff to&nbsp;"\
"&#8220;typogrify&#8221;.</p>\n<p>Now with added "\
'support for <abbr title="three letter acronym">'\
'<span class="caps">TLA</span></abbr>.</p>\n'
@ -168,7 +169,7 @@ class MdReaderTest(unittest.TestCase):
settings={})
expected = {
'category': 'yeah',
'author': u'Alexis Métaireau',
'author': 'Alexis Métaireau',
}
for key, value in expected.items():
self.assertEquals(value, metadata[key], key)
@ -180,7 +181,7 @@ class MdReaderTest(unittest.TestCase):
})
expected = {
'category': 'yeah',
'author': u'Alexis Métaireau',
'author': 'Alexis Métaireau',
'date': datetime.datetime(2012, 11, 30),
}
for key, value in expected.items():
@ -195,7 +196,7 @@ class MdReaderTest(unittest.TestCase):
})
expected = {
'category': 'yeah',
'author': u'Alexis Métaireau',
'author': 'Alexis Métaireau',
'date': datetime.datetime(2012, 11, 30),
'slug': 'md_w_filename_meta',
'mymeta': 'foo',
@ -203,20 +204,6 @@ class MdReaderTest(unittest.TestCase):
for key, value in expected.items():
self.assertEquals(value, metadata[key], key)
@unittest.skipUnless(readers.Markdown, "markdown isn't installed")
def test_article_with_summary_metadata(self):
reader = readers.MarkdownReader({})
content, metadata = reader.read(
_filename('article_with_markdown_and_summary_metadata_single.md'))
expected_summary = u'<p>A single-line summary should be supported'\
u' as well as <strong>inline markup</strong>.</p>'
self.assertEquals(expected_summary, metadata['summary'], 'summary')
content, metadata = reader.read(
_filename('article_with_markdown_and_summary_metadata_multi.md'))
expected_summary = u'<p>A multi-line summary should be supported'\
u'\nas well as <strong>inline markup</strong>.</p>'
self.assertEquals(expected_summary, metadata['summary'], 'summary')
class AdReaderTest(unittest.TestCase):
@unittest.skipUnless(readers.asciidoc, "asciidoc isn't installed")

View file

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import copy
from os.path import dirname, abspath, join
@ -16,7 +18,7 @@ class TestSettingsConfiguration(unittest.TestCase):
self.settings = read_settings(default_conf)
def test_overwrite_existing_settings(self):
self.assertEqual(self.settings.get('SITENAME'), u"Alexis' log")
self.assertEqual(self.settings.get('SITENAME'), "Alexis' log")
self.assertEqual(self.settings.get('SITEURL'),
'http://blog.notmyidea.org')

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import shutil
import os
import datetime
@ -41,10 +42,10 @@ class TestUtils(unittest.TestCase):
samples = (('this is a test', 'this-is-a-test'),
('this is a test', 'this-is-a-test'),
(u'this → is ← a ↑ test', 'this-is-a-test'),
('this → is ← a ↑ test', 'this-is-a-test'),
('this--is---a test', 'this-is-a-test'),
(u'unicode測試許功蓋你看到了嗎', 'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
(u'大飯原発4号機、18日夜起動へ', 'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)
('unicode測試許功蓋你看到了嗎', 'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
('大飯原発4号機、18日夜起動へ', 'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)
for value, expected in samples:
self.assertEquals(utils.slugify(value), expected)

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals
import hashlib
import os

65
tox.ini
View file

@ -1,15 +1,74 @@
# This tests the unified codebase (py26-py32) of Pelican.
# depends on some external libraries that aren't released yet.
#
# To run Pelican, you will already have checked out and installed them.
#
# Now we must tell tox about this package, otherwise tox would load the old
# libraries from PyPi.
#
# Run tox from the libraries source tree. It will save its package in
# the distshare directory from where the tests here will pick it up.
#
# Do that for
# https://github.com/dmdm/smartypants.git
#
# and typogrify:
# https://github.com/dmdm/typogrify/tree/py3k
#
# and webassets:
# https://github.com/dmdm/webassets/tree/py3k
#
#
# CAVEAT:
# -------
#
# 1/
# Please be aware that my ports of typogrify and webassets are just 2to3'd.
# They are not backwards compatible with Python 2.
#
# 2/
# Webassets still has unresolved issues, so I deactivated it for Py32 tests.
[tox]
envlist = py26,py27
envlist = py26,py27,py32
[testenv]
commands =
nosetests -s tests
unit2 discover []
nosetests -s tests
deps =
[testenv:py26]
deps =
nose
unittest2
mock
Markdown
BeautifulSoup
BeautifulSoup4
feedgenerator
typogrify
webassets
[testenv:py27]
deps =
nose
unittest2
mock
Markdown
BeautifulSoup4
feedgenerator
typogrify
webassets
[testenv:py32]
deps =
nose
unittest2py3k
mock
Markdown
BeautifulSoup4
feedgenerator
# {distshare}/smartypants-1.6.0.3.zip
# {distshare}/typogrify-2.0.0.zip
# {distshare}/webassets-0.8.dev.zip