diff --git a/.travis.yml b/.travis.yml
index 5d7d4a5f..1be196f2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,6 +3,7 @@ python:
- "2.7"
env:
- TOX_ENV=docs
+ - TOX_ENV=flake8
- TOX_ENV=py27
- TOX_ENV=py33
- TOX_ENV=py34
diff --git a/pelican/__init__.py b/pelican/__init__.py
index 1af14897..7fb8dfe4 100644
--- a/pelican/__init__.py
+++ b/pelican/__init__.py
@@ -1,45 +1,41 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
-import six
+from __future__ import print_function, unicode_literals
+import argparse
+import collections
+import locale
+import logging
import os
import re
import sys
import time
-import logging
-import argparse
-import locale
-import collections
+
+import six
# pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger
-from pelican.log import init
-
+from pelican.log import init # noqa
from pelican import signals
-
from pelican.generators import (ArticlesGenerator, PagesGenerator,
- StaticGenerator, SourceFileGenerator,
+ SourceFileGenerator, StaticGenerator,
TemplatePagesGenerator)
from pelican.readers import Readers
from pelican.settings import read_settings
-from pelican.utils import (clean_output_dir, folder_watcher,
- file_watcher, maybe_pluralize)
+from pelican.utils import (clean_output_dir, file_watcher,
+ folder_watcher, maybe_pluralize)
from pelican.writers import Writer
__version__ = "3.6.4.dev0"
-
DEFAULT_CONFIG_NAME = 'pelicanconf.py'
-
-
logger = logging.getLogger(__name__)
class Pelican(object):
def __init__(self, settings):
- """
- Pelican initialisation, performs some checks on the environment before
- doing anything else.
+ """Pelican initialisation
+
+ Performs some checks on the environment before doing anything else.
"""
# define the default settings
@@ -152,7 +148,7 @@ class Pelican(object):
context = self.settings.copy()
# Share these among all the generators and content objects:
context['filenames'] = {} # maps source path to Content object or None
- context['localsiteurl'] = self.settings['SITEURL']
+ context['localsiteurl'] = self.settings['SITEURL']
generators = [
cls(
@@ -190,23 +186,23 @@ class Pelican(object):
if isinstance(g, PagesGenerator))
pluralized_articles = maybe_pluralize(
- len(articles_generator.articles) +
- len(articles_generator.translations),
+ (len(articles_generator.articles) +
+ len(articles_generator.translations)),
'article',
'articles')
pluralized_drafts = maybe_pluralize(
- len(articles_generator.drafts) +
- len(articles_generator.drafts_translations),
+ (len(articles_generator.drafts) +
+ len(articles_generator.drafts_translations)),
'draft',
'drafts')
pluralized_pages = maybe_pluralize(
- len(pages_generator.pages) +
- len(pages_generator.translations),
+ (len(pages_generator.pages) +
+ len(pages_generator.translations)),
'page',
'pages')
pluralized_hidden_pages = maybe_pluralize(
- len(pages_generator.hidden_pages) +
- len(pages_generator.hidden_translations),
+ (len(pages_generator.hidden_pages) +
+ len(pages_generator.hidden_translations)),
'hidden page',
'hidden pages')
@@ -243,8 +239,8 @@ class Pelican(object):
return generators
def get_writer(self):
- writers = [ w for (_, w) in signals.get_writer.send(self)
- if isinstance(w, type) ]
+ writers = [w for (_, w) in signals.get_writer.send(self)
+ if isinstance(w, type)]
writers_found = len(writers)
if writers_found == 0:
return Writer(self.output_path, settings=self.settings)
@@ -254,15 +250,15 @@ class Pelican(object):
logger.debug('Found writer: %s', writer)
else:
logger.warning(
- '%s writers found, using only first one: %s',
+ '%s writers found, using only first one: %s',
writers_found, writer)
return writer(self.output_path, settings=self.settings)
def parse_arguments():
parser = argparse.ArgumentParser(
- description="""A tool to generate a static blog,
- with restructured text input files.""",
+ description='A tool to generate a static blog, '
+ ' with restructured text input files.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
@@ -354,7 +350,7 @@ def get_config(args):
# argparse returns bytes in Py2. There is no definite answer as to which
# encoding argparse (or sys.argv) uses.
# "Best" option seems to be locale.getpreferredencoding()
- # ref: http://mail.python.org/pipermail/python-list/2006-October/405766.html
+ # http://mail.python.org/pipermail/python-list/2006-October/405766.html
if not six.PY3:
enc = locale.getpreferredencoding()
for key in config:
@@ -424,7 +420,8 @@ def main():
# Added static paths
# Add new watchers and set them as modified
- for static_path in set(new_static).difference(old_static):
+ new_watchers = set(new_static).difference(old_static)
+ for static_path in new_watchers:
static_key = '[static]%s' % static_path
watchers[static_key] = folder_watcher(
os.path.join(pelican.path, static_path),
@@ -434,7 +431,8 @@ def main():
# Removed static paths
# Remove watchers and modified values
- for static_path in set(old_static).difference(new_static):
+ old_watchers = set(old_static).difference(new_static)
+ for static_path in old_watchers:
static_key = '[static]%s' % static_path
watchers.pop(static_key)
modified.pop(static_key)
diff --git a/pelican/cache.py b/pelican/cache.py
index d955ae08..e6c10cb9 100644
--- a/pelican/cache.py
+++ b/pelican/cache.py
@@ -1,16 +1,14 @@
+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
import logging
import os
-try:
- import cPickle as pickle
-except:
- import pickle
+
+from six.moves import cPickle as pickle
from pelican.utils import mkdir_p
-
logger = logging.getLogger(__name__)
@@ -83,6 +81,7 @@ class FileStampDataCacher(FileDataCacher):
"""This sublcass additionally sets filestamp function
and base path for filestamping operations
"""
+
super(FileStampDataCacher, self).__init__(settings, cache_name,
caching_policy,
load_policy)
@@ -118,6 +117,7 @@ class FileStampDataCacher(FileDataCacher):
a hash for a function name in the hashlib module
or an empty bytes string otherwise
"""
+
try:
return self._filestamp_func(filename)
except (IOError, OSError, TypeError) as err:
@@ -133,6 +133,7 @@ class FileStampDataCacher(FileDataCacher):
Modification is checked by comparing the cached
and current file stamp.
"""
+
stamp, data = super(FileStampDataCacher, self).get_cached_data(
filename, (None, default))
if stamp != self._get_file_stamp(filename):
diff --git a/pelican/contents.py b/pelican/contents.py
index a6b8cc5f..16d1f074 100644
--- a/pelican/contents.py
+++ b/pelican/contents.py
@@ -1,23 +1,24 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
-import six
-from six.moves.urllib.parse import urlparse, urlunparse
+from __future__ import print_function, unicode_literals
import copy
import locale
import logging
-import functools
import os
import re
import sys
import pytz
+import six
+from six.moves.urllib.parse import urlparse, urlunparse
+
from pelican import signals
from pelican.settings import DEFAULT_CONFIG
-from pelican.utils import (slugify, truncate_html_words, memoized, strftime,
- python_2_unicode_compatible, deprecated_attribute,
- path_to_url, posixize_path, set_date_tzinfo, SafeDatetime)
+from pelican.utils import (SafeDatetime, deprecated_attribute, memoized,
+ path_to_url, posixize_path,
+ python_2_unicode_compatible, set_date_tzinfo,
+ slugify, strftime, truncate_html_words)
# Import these so that they're avalaible when you import from pelican.contents.
from pelican.urlwrappers import (URLWrapper, Author, Category, Tag) # NOQA
@@ -66,7 +67,7 @@ class Content(object):
# also keep track of the metadata attributes available
self.metadata = local_metadata
- #default template if it's not defined in page
+ # default template if it's not defined in page
self.template = self._get_template()
# First, read the authors from "authors", if not, fallback to "author"
@@ -94,13 +95,16 @@ class Content(object):
# create the slug if not existing, generate slug according to
# setting of SLUG_ATTRIBUTE
if not hasattr(self, 'slug'):
- if settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title'):
+ if (settings['SLUGIFY_SOURCE'] == 'title' and
+ hasattr(self, 'title')):
self.slug = slugify(self.title,
- settings.get('SLUG_SUBSTITUTIONS', ()))
- elif settings['SLUGIFY_SOURCE'] == 'basename' and source_path != None:
- basename = os.path.basename(os.path.splitext(source_path)[0])
- self.slug = slugify(basename,
- settings.get('SLUG_SUBSTITUTIONS', ()))
+ settings.get('SLUG_SUBSTITUTIONS', ()))
+ elif (settings['SLUGIFY_SOURCE'] == 'basename' and
+ source_path is not None):
+ basename = os.path.basename(
+ os.path.splitext(source_path)[0])
+ self.slug = slugify(
+ basename, settings.get('SLUG_SUBSTITUTIONS', ()))
self.source_path = source_path
@@ -233,7 +237,8 @@ class Content(object):
if isinstance(linked_content, Static):
linked_content.attach_to(self)
else:
- logger.warning("%s used {attach} link syntax on a "
+ logger.warning(
+ "%s used {attach} link syntax on a "
"non-static file. Use {filename} instead.",
self.get_relative_source_path())
origin = '/'.join((siteurl, linked_content.url))
@@ -241,7 +246,7 @@ class Content(object):
else:
logger.warning(
"Unable to find `%s`, skipping url replacement.",
- value.geturl(), extra = {
+ value.geturl(), extra={
'limit_msg': ("Other resources were not found "
"and their urls not replaced")})
elif what == 'category':
@@ -250,9 +255,9 @@ class Content(object):
origin = '/'.join((siteurl, Tag(path, self.settings).url))
else:
logger.warning(
- "Replacement Indicator '%s' not recognized, "
- "skipping replacement",
- what)
+ "Replacement Indicator '%s' not recognized, "
+ "skipping replacement",
+ what)
# keep all other parts, such as query, fragment, etc.
parts = list(value)
@@ -337,7 +342,9 @@ class Content(object):
return posixize_path(
os.path.relpath(
- os.path.abspath(os.path.join(self.settings['PATH'], source_path)),
+ os.path.abspath(os.path.join(
+ self.settings['PATH'],
+ source_path)),
os.path.abspath(self.settings['PATH'])
))
@@ -402,9 +409,12 @@ class Static(Page):
def attach_to(self, content):
"""Override our output directory with that of the given content object.
"""
- # Determine our file's new output path relative to the linking document.
- # If it currently lives beneath the linking document's source directory,
- # preserve that relationship on output. Otherwise, make it a sibling.
+
+ # Determine our file's new output path relative to the linking
+ # document. If it currently lives beneath the linking
+ # document's source directory, preserve that relationship on output.
+ # Otherwise, make it a sibling.
+
linking_source_dir = os.path.dirname(content.source_path)
tail_path = os.path.relpath(self.source_path, linking_source_dir)
if tail_path.startswith(os.pardir + os.sep):
@@ -420,11 +430,14 @@ class Static(Page):
# 'some/content' with a file named 'index.html'.) Rather than trying
# to figure it out by comparing the linking document's url and save_as
# path, we simply build our new url from our new save_as path.
+
new_url = path_to_url(new_save_as)
def _log_reason(reason):
- logger.warning("The {attach} link in %s cannot relocate %s "
- "because %s. Falling back to {filename} link behavior instead.",
+ logger.warning(
+ "The {attach} link in %s cannot relocate "
+ "%s because %s. Falling back to "
+ "{filename} link behavior instead.",
content.get_relative_source_path(),
self.get_relative_source_path(), reason,
extra={'limit_msg': "More {attach} warnings silenced."})
@@ -452,5 +465,6 @@ def is_valid_content(content, f):
content.check_properties()
return True
except NameError as e:
- logger.error("Skipping %s: could not find information about '%s'", f, e)
+ logger.error(
+ "Skipping %s: could not find information about '%s'", f, e)
return False
diff --git a/pelican/generators.py b/pelican/generators.py
index da651252..ff9a9d7c 100644
--- a/pelican/generators.py
+++ b/pelican/generators.py
@@ -1,28 +1,28 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
+from __future__ import print_function, unicode_literals
-import os
-import six
-import logging
-import shutil
-import fnmatch
import calendar
-
+import fnmatch
+import logging
+import os
+import shutil
from codecs import open
from collections import defaultdict
from functools import partial
from itertools import chain, groupby
from operator import attrgetter
-from jinja2 import (Environment, FileSystemLoader, PrefixLoader, ChoiceLoader,
- BaseLoader, TemplateNotFound)
+from jinja2 import (BaseLoader, ChoiceLoader, Environment, FileSystemLoader,
+ PrefixLoader, TemplateNotFound)
+import six
+
+from pelican import signals
from pelican.cache import FileStampDataCacher
from pelican.contents import Article, Draft, Page, Static, is_valid_content
from pelican.readers import Readers
-from pelican.utils import (copy, process_translations, mkdir_p, DateFormatter,
- python_2_unicode_compatible, posixize_path)
-from pelican import signals
+from pelican.utils import (DateFormatter, copy, mkdir_p, posixize_path,
+ process_translations, python_2_unicode_compatible)
logger = logging.getLogger(__name__)
@@ -31,6 +31,7 @@ logger = logging.getLogger(__name__)
class PelicanTemplateNotFound(Exception):
pass
+
@python_2_unicode_compatible
class Generator(object):
"""Baseclass generator"""
@@ -90,8 +91,9 @@ class Generator(object):
try:
self._templates[name] = self.env.get_template(name + '.html')
except TemplateNotFound:
- raise PelicanTemplateNotFound('[templates] unable to load %s.html from %s'
- % (name, self._templates_path))
+ raise PelicanTemplateNotFound(
+ '[templates] unable to load {}.html from {}'.format(
+ name, self._templates_path))
return self._templates[name]
def _include_path(self, path, extensions=None):
@@ -105,7 +107,7 @@ class Generator(object):
extensions = tuple(self.readers.extensions)
basename = os.path.basename(path)
- #check IGNORE_FILES
+ # check IGNORE_FILES
ignores = self.settings['IGNORE_FILES']
if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):
return False
@@ -122,8 +124,9 @@ class Generator(object):
:param extensions: the list of allowed extensions (if False, all
extensions are allowed)
"""
+ # backward compatibility for older generators
if isinstance(paths, six.string_types):
- paths = [paths] # backward compatibility for older generators
+ paths = [paths]
# group the exclude dir names by parent path, for use with os.walk()
exclusions_by_dirpath = {}
@@ -138,7 +141,8 @@ class Generator(object):
root = os.path.join(self.path, path) if path else self.path
if os.path.isdir(root):
- for dirpath, dirs, temp_files in os.walk(root, followlinks=True):
+ for dirpath, dirs, temp_files in os.walk(
+ root, followlinks=True):
drop = []
excl = exclusions_by_dirpath.get(dirpath, ())
for d in dirs:
@@ -178,7 +182,8 @@ class Generator(object):
before this method is called, even if they failed to process.)
The path argument is expected to be relative to self.path.
"""
- return posixize_path(os.path.normpath(path)) in self.context['filenames']
+ return (posixize_path(os.path.normpath(path))
+ in self.context['filenames'])
def _update_context(self, items):
"""Update the context with the given items from the currrent
@@ -211,7 +216,8 @@ class CachingGenerator(Generator, FileStampDataCacher):
readers_cache_name=(cls_name + '-Readers'),
**kwargs)
- cache_this_level = self.settings['CONTENT_CACHING_LAYER'] == 'generator'
+ cache_this_level = \
+ self.settings['CONTENT_CACHING_LAYER'] == 'generator'
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
FileStampDataCacher.__init__(self, self.settings, cls_name,
@@ -259,14 +265,14 @@ class ArticlesGenerator(CachingGenerator):
def __init__(self, *args, **kwargs):
"""initialize properties"""
- self.articles = [] # only articles in default language
+ self.articles = [] # only articles in default language
self.translations = []
self.dates = {}
self.tags = defaultdict(list)
self.categories = defaultdict(list)
self.related_posts = []
self.authors = defaultdict(list)
- self.drafts = [] # only drafts in default language
+ self.drafts = [] # only drafts in default language
self.drafts_translations = []
super(ArticlesGenerator, self).__init__(*args, **kwargs)
signals.article_generator_init.send(self)
@@ -282,8 +288,8 @@ class ArticlesGenerator(CachingGenerator):
writer.write_feed(self.articles, self.context,
self.settings['FEED_RSS'], feed_type='rss')
- if (self.settings.get('FEED_ALL_ATOM')
- or self.settings.get('FEED_ALL_RSS')):
+ if (self.settings.get('FEED_ALL_ATOM') or
+ self.settings.get('FEED_ALL_RSS')):
all_articles = list(self.articles)
for article in self.articles:
all_articles.extend(article.translations)
@@ -322,8 +328,8 @@ class ArticlesGenerator(CachingGenerator):
self.settings['AUTHOR_FEED_RSS']
% auth.slug, feed_type='rss')
- if (self.settings.get('TAG_FEED_ATOM')
- or self.settings.get('TAG_FEED_RSS')):
+ if (self.settings.get('TAG_FEED_ATOM') or
+ self.settings.get('TAG_FEED_RSS')):
for tag, arts in self.tags.items():
arts.sort(key=attrgetter('date'), reverse=True)
if self.settings.get('TAG_FEED_ATOM'):
@@ -336,8 +342,8 @@ class ArticlesGenerator(CachingGenerator):
self.settings['TAG_FEED_RSS'] % tag.slug,
feed_type='rss')
- if (self.settings.get('TRANSLATION_FEED_ATOM')
- or self.settings.get('TRANSLATION_FEED_RSS')):
+ if (self.settings.get('TRANSLATION_FEED_ATOM') or
+ self.settings.get('TRANSLATION_FEED_RSS')):
translations_feeds = defaultdict(list)
for article in chain(self.articles, self.translations):
translations_feeds[article.lang].append(article)
@@ -472,9 +478,9 @@ class ArticlesGenerator(CachingGenerator):
"""Generate drafts pages."""
for draft in chain(self.drafts_translations, self.drafts):
write(draft.save_as, self.get_template(draft.template),
- self.context, article=draft, category=draft.category,
- override_output=hasattr(draft, 'override_save_as'),
- blog=True, all_articles=self.articles)
+ self.context, article=draft, category=draft.category,
+ override_output=hasattr(draft, 'override_save_as'),
+ blog=True, all_articles=self.articles)
def generate_pages(self, writer):
"""Generate the pages on the disk"""
@@ -503,7 +509,8 @@ class ArticlesGenerator(CachingGenerator):
exclude=self.settings['ARTICLE_EXCLUDES']):
article_or_draft = self.get_cached_data(f, None)
if article_or_draft is None:
- #TODO needs overhaul, maybe nomad for read_file solution, unified behaviour
+ # TODO needs overhaul, maybe nomad for read_file
+ # solution, unified behaviour
try:
article_or_draft = self.readers.read_file(
base_path=self.path, path=f, content_class=Article,
@@ -513,7 +520,8 @@ class ArticlesGenerator(CachingGenerator):
context_signal=signals.article_generator_context,
context_sender=self)
except Exception as e:
- logger.error('Could not process %s\n%s', f, e,
+ logger.error(
+ 'Could not process %s\n%s', f, e,
exc_info=self.settings.get('DEBUG', False))
self._add_failed_source_path(f)
continue
@@ -535,8 +543,9 @@ class ArticlesGenerator(CachingGenerator):
self.add_source_path(article_or_draft)
all_drafts.append(article_or_draft)
else:
- logger.error("Unknown status '%s' for file %s, skipping it.",
- article_or_draft.status, f)
+ logger.error(
+ "Unknown status '%s' for file %s, skipping it.",
+ article_or_draft.status, f)
self._add_failed_source_path(f)
continue
@@ -544,9 +553,9 @@ class ArticlesGenerator(CachingGenerator):
self.add_source_path(article_or_draft)
-
- self.articles, self.translations = process_translations(all_articles,
- order_by=self.settings['ARTICLE_ORDER_BY'])
+ self.articles, self.translations = process_translations(
+ all_articles,
+ order_by=self.settings['ARTICLE_ORDER_BY'])
self.drafts, self.drafts_translations = \
process_translations(all_drafts)
@@ -615,7 +624,8 @@ class PagesGenerator(CachingGenerator):
context_signal=signals.page_generator_context,
context_sender=self)
except Exception as e:
- logger.error('Could not process %s\n%s', f, e,
+ logger.error(
+ 'Could not process %s\n%s', f, e,
exc_info=self.settings.get('DEBUG', False))
self._add_failed_source_path(f)
continue
@@ -629,8 +639,9 @@ class PagesGenerator(CachingGenerator):
elif page.status.lower() == "hidden":
hidden_pages.append(page)
else:
- logger.error("Unknown status '%s' for file %s, skipping it.",
- page.status, f)
+ logger.error(
+ "Unknown status '%s' for file %s, skipping it.",
+ page.status, f)
self._add_failed_source_path(f)
continue
@@ -638,10 +649,11 @@ class PagesGenerator(CachingGenerator):
self.add_source_path(page)
- self.pages, self.translations = process_translations(all_pages,
- order_by=self.settings['PAGE_ORDER_BY'])
- self.hidden_pages, self.hidden_translations = (
- process_translations(hidden_pages))
+ self.pages, self.translations = process_translations(
+ all_pages,
+ order_by=self.settings['PAGE_ORDER_BY'])
+ self.hidden_pages, self.hidden_translations = \
+ process_translations(hidden_pages)
self._update_context(('pages', 'hidden_pages'))
diff --git a/pelican/log.py b/pelican/log.py
index c83c5810..0f4b795b 100644
--- a/pelican/log.py
+++ b/pelican/log.py
@@ -1,18 +1,18 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
+from __future__ import print_function, unicode_literals
+
+import locale
+import logging
+import os
+import sys
+from collections import Mapping, defaultdict
+
+import six
__all__ = [
'init'
]
-import os
-import sys
-import logging
-import locale
-
-from collections import defaultdict, Mapping
-
-import six
class BaseFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
@@ -20,7 +20,8 @@ class BaseFormatter(logging.Formatter):
super(BaseFormatter, self).__init__(fmt=FORMAT, datefmt=datefmt)
def format(self, record):
- record.__dict__['customlevelname'] = self._get_levelname(record.levelname)
+ customlevel = self._get_levelname(record.levelname)
+ record.__dict__['customlevelname'] = customlevel
# format multiline messages 'nicely' to make it clear they are together
record.msg = record.msg.replace('\n', '\n | ')
return super(BaseFormatter, self).format(record)
@@ -132,13 +133,13 @@ class SafeLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
# if the only argument is a Mapping, Logger uses that for formatting
# format values for that case
- if args and len(args)==1 and isinstance(args[0], Mapping):
+ if args and len(args) == 1 and isinstance(args[0], Mapping):
args = ({k: self._decode_arg(v) for k, v in args[0].items()},)
# otherwise, format each arg
else:
args = tuple(self._decode_arg(arg) for arg in args)
- super(SafeLogger, self)._log(level, msg, args,
- exc_info=exc_info, extra=extra)
+ super(SafeLogger, self)._log(
+ level, msg, args, exc_info=exc_info, extra=extra)
def _decode_arg(self, arg):
'''
@@ -175,8 +176,7 @@ def init(level=None, handler=logging.StreamHandler()):
logger = logging.getLogger()
- if (os.isatty(sys.stdout.fileno())
- and not sys.platform.startswith('win')):
+ if os.isatty(sys.stdout.fileno()) and not sys.platform.startswith('win'):
fmt = ANSIFormatter()
else:
fmt = TextFormatter()
diff --git a/pelican/paginator.py b/pelican/paginator.py
index 0189ec91..9aca550b 100644
--- a/pelican/paginator.py
+++ b/pelican/paginator.py
@@ -1,18 +1,15 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
-import six
+from __future__ import print_function, unicode_literals
-# From django.core.paginator
-from collections import namedtuple
import functools
import logging
import os
-
+from collections import namedtuple
from math import ceil
+import six
+
logger = logging.getLogger(__name__)
-
-
PaginationRule = namedtuple(
'PaginationRule',
'min_page URL SAVE_AS',
@@ -143,7 +140,7 @@ class Page(object):
'settings': self.settings,
'base_name': os.path.dirname(self.name),
'number_sep': '/',
- 'extension': self.extension,
+ 'extension': self.extension,
}
if self.number == 1:
diff --git a/pelican/readers.py b/pelican/readers.py
index c1c8dbfa..bc4515e7 100644
--- a/pelican/readers.py
+++ b/pelican/readers.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
+from __future__ import print_function, unicode_literals
import logging
import os
@@ -9,24 +9,50 @@ import docutils
import docutils.core
import docutils.io
from docutils.writers.html4css1 import HTMLTranslator
-import six
-# import the directives to have pygments support
+import six
+from six.moves.html_parser import HTMLParser
+
from pelican import rstdirectives # NOQA
+from pelican import signals
+from pelican.cache import FileStampDataCacher
+from pelican.contents import Author, Category, Page, Tag
+from pelican.utils import SafeDatetime, get_date, pelican_open, posixize_path
+
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
+
try:
from html import escape
except ImportError:
from cgi import escape
-from six.moves.html_parser import HTMLParser
-from pelican import signals
-from pelican.cache import FileStampDataCacher
-from pelican.contents import Page, Category, Tag, Author
-from pelican.utils import get_date, pelican_open, SafeDatetime, posixize_path
+# Metadata processors have no way to discard an unwanted value, so we have
+# them return this value instead to signal that it should be discarded later.
+# This means that _filter_discardable_metadata() must be called on processed
+# metadata dicts before use, to remove the items with the special value.
+_DISCARD = object()
+METADATA_PROCESSORS = {
+ 'tags': lambda x, y: ([
+ Tag(tag, y)
+ for tag in ensure_metadata_list(x)
+ ] or _DISCARD),
+ 'date': lambda x, y: get_date(x.replace('_', ' ')),
+ 'modified': lambda x, y: get_date(x),
+ 'status': lambda x, y: x.strip() or _DISCARD,
+ 'category': lambda x, y: _process_if_nonempty(Category, x, y),
+ 'author': lambda x, y: _process_if_nonempty(Author, x, y),
+ 'authors': lambda x, y: ([
+ Author(author, y)
+ for author in ensure_metadata_list(x)
+ ] or _DISCARD),
+ 'slug': lambda x, y: x.strip() or _DISCARD,
+}
+
+logger = logging.getLogger(__name__)
+
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
@@ -49,13 +75,6 @@ def ensure_metadata_list(text):
return [v for v in (w.strip() for w in text) if v]
-# Metadata processors have no way to discard an unwanted value, so we have
-# them return this value instead to signal that it should be discarded later.
-# This means that _filter_discardable_metadata() must be called on processed
-# metadata dicts before use, to remove the items with the special value.
-_DISCARD = object()
-
-
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
@@ -64,28 +83,11 @@ def _process_if_nonempty(processor, name, settings):
return processor(name, settings) if name else _DISCARD
-METADATA_PROCESSORS = {
- 'tags': lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)]
- or _DISCARD),
- 'date': lambda x, y: get_date(x.replace('_', ' ')),
- 'modified': lambda x, y: get_date(x),
- 'status': lambda x, y: x.strip() or _DISCARD,
- 'category': lambda x, y: _process_if_nonempty(Category, x, y),
- 'author': lambda x, y: _process_if_nonempty(Author, x, y),
- 'authors': lambda x, y: ([Author(author, y)
- for author in ensure_metadata_list(x)]
- or _DISCARD),
- 'slug': lambda x, y: x.strip() or _DISCARD,
-}
-
-
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
-logger = logging.getLogger(__name__)
-
class BaseReader(object):
"""Base class to read files.
@@ -267,8 +269,10 @@ class MarkdownReader(BaseReader):
output[name] = self.process_metadata(name, summary)
elif name in METADATA_PROCESSORS:
if len(value) > 1:
- logger.warning('Duplicate definition of `%s` '
- 'for %s. Using first one.', name, self._source_path)
+ logger.warning(
+ 'Duplicate definition of `%s` '
+ 'for %s. Using first one.',
+ name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
@@ -380,7 +384,8 @@ class HTMLReader(BaseReader):
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
- attr_serialized = ', '.join(['{}="{}"'.format(k, v) for k, v in attrs])
+ attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
+ attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
@@ -394,9 +399,9 @@ class HTMLReader(BaseReader):
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
- extra={'limit_msg': ("Other files have meta tag "
- "attribute 'contents' that should "
- "be changed to 'content'")})
+ extra={'limit_msg': "Other files have meta tag "
+ "attribute 'contents' that should "
+ "be changed to 'content'"})
if name == 'keywords':
name = 'tags'
@@ -474,7 +479,8 @@ class Readers(FileStampDataCacher):
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
- logger.debug('Read file %s -> %s',
+ logger.debug(
+ 'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
@@ -486,7 +492,8 @@ class Readers(FileStampDataCacher):
'Pelican does not know how to parse %s', path)
if preread_signal:
- logger.debug('Signal %s.send(%s)',
+ logger.debug(
+ 'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
@@ -527,7 +534,9 @@ class Readers(FileStampDataCacher):
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
- return typogrify(text, self.settings['TYPOGRIFY_IGNORE_TAGS'])
+ return typogrify(
+ text,
+ self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
@@ -539,8 +548,10 @@ class Readers(FileStampDataCacher):
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
- logger.debug('Signal %s.send(%s,
@@ -49,7 +50,7 @@ class TestPage(unittest.TestCase):
# them to initialise object's attributes.
metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', }
page = Page(TEST_CONTENT, metadata=metadata,
- context={'localsiteurl': ''})
+ context={'localsiteurl': ''})
for key, value in metadata.items():
self.assertTrue(hasattr(page, key))
self.assertEqual(value, getattr(page, key))
@@ -139,14 +140,9 @@ class TestPage(unittest.TestCase):
page = Page(**page_kwargs)
# page.locale_date is a unicode string in both python2 and python3
- dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT'])
- # dt_date is a byte string in python2, and a unicode string in python3
- # Let's make sure it is a unicode string (relies on python 3.3 supporting the u prefix)
- if type(dt_date) != type(u''):
- # python2:
- dt_date = unicode(dt_date, 'utf8')
+ dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT'])
- self.assertEqual(page.locale_date, dt_date )
+ self.assertEqual(page.locale_date, dt_date)
page_kwargs['settings'] = get_settings()
# I doubt this can work on all platforms ...
@@ -307,10 +303,14 @@ class TestPage(unittest.TestCase):
args['settings'] = get_settings()
args['source_path'] = 'content'
args['context']['filenames'] = {
- 'images/poster.jpg': type(cls_name, (object,), {'url': 'images/poster.jpg'}),
- 'assets/video.mp4': type(cls_name, (object,), {'url': 'assets/video.mp4'}),
- 'images/graph.svg': type(cls_name, (object,), {'url': 'images/graph.svg'}),
- 'reference.rst': type(cls_name, (object,), {'url': 'reference.html'}),
+ 'images/poster.jpg': type(
+ cls_name, (object,), {'url': 'images/poster.jpg'}),
+ 'assets/video.mp4': type(
+ cls_name, (object,), {'url': 'assets/video.mp4'}),
+ 'images/graph.svg': type(
+ cls_name, (object,), {'url': 'images/graph.svg'}),
+ 'reference.rst': type(
+ cls_name, (object,), {'url': 'reference.html'}),
}
# video.poster
@@ -325,20 +325,25 @@ class TestPage(unittest.TestCase):
content,
'There is a video with poster '
''
)
# object.data
args['content'] = (
'There is a svg object '
- ''
+ ''
)
content = Page(**args).get_content('http://notmyidea.org')
self.assertEqual(
content,
'There is a svg object '
- ''
+ ''
)
# blockquote.cite
@@ -350,7 +355,9 @@ class TestPage(unittest.TestCase):
self.assertEqual(
content,
'There is a blockquote with cite attribute '
- 'blah blah
'
+ ''
+ 'blah blah'
+ '
'
)
def test_intrasite_link_markdown_spaces(self):
@@ -401,17 +408,19 @@ class TestArticle(TestPage):
def test_slugify_category_author(self):
settings = get_settings()
- settings['SLUG_SUBSTITUTIONS'] = [ ('C#', 'csharp') ]
+ settings['SLUG_SUBSTITUTIONS'] = [('C#', 'csharp')]
settings['ARTICLE_URL'] = '{author}/{category}/{slug}/'
settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html'
article_kwargs = self._copy_page_kwargs()
article_kwargs['metadata']['author'] = Author("O'Brien", settings)
- article_kwargs['metadata']['category'] = Category('C# & stuff', settings)
+ article_kwargs['metadata']['category'] = Category(
+ 'C# & stuff', settings)
article_kwargs['metadata']['title'] = 'fnord'
article_kwargs['settings'] = settings
article = Article(**article_kwargs)
self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/')
- self.assertEqual(article.save_as, 'obrien/csharp-stuff/fnord/index.html')
+ self.assertEqual(
+ article.save_as, 'obrien/csharp-stuff/fnord/index.html')
class TestStatic(LoggedTestCase):
@@ -426,7 +435,8 @@ class TestStatic(LoggedTestCase):
self.context = self.settings.copy()
self.static = Static(content=None, metadata={}, settings=self.settings,
- source_path=posix_join('dir', 'foo.jpg'), context=self.context)
+ source_path=posix_join('dir', 'foo.jpg'),
+ context=self.context)
self.context['filenames'] = {self.static.source_path: self.static}
@@ -436,8 +446,10 @@ class TestStatic(LoggedTestCase):
def test_attach_to_same_dir(self):
"""attach_to() overrides a static file's save_as and url.
"""
- page = Page(content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
+ page = Page(
+ content="fake page",
+ metadata={'title': 'fakepage'},
+ settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page)
@@ -449,7 +461,7 @@ class TestStatic(LoggedTestCase):
"""attach_to() preserves dirs inside the linking document dir.
"""
page = Page(content="fake page", metadata={'title': 'fakepage'},
- settings=self.settings, source_path='fakepage.md')
+ settings=self.settings, source_path='fakepage.md')
self.static.attach_to(page)
expected_save_as = os.path.join('outpages', 'dir', 'foo.jpg')
@@ -460,8 +472,8 @@ class TestStatic(LoggedTestCase):
"""attach_to() ignores dirs outside the linking document dir.
"""
page = Page(content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'))
+ metadata={'title': 'fakepage'}, settings=self.settings,
+ source_path=os.path.join('dir', 'otherdir', 'fakepage.md'))
self.static.attach_to(page)
expected_save_as = os.path.join('outpages', 'foo.jpg')
@@ -472,8 +484,8 @@ class TestStatic(LoggedTestCase):
"""attach_to() does nothing when called a second time.
"""
page = Page(content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
- source_path=os.path.join('dir', 'fakepage.md'))
+ metadata={'title': 'fakepage'}, settings=self.settings,
+ source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page)
@@ -481,8 +493,10 @@ class TestStatic(LoggedTestCase):
otherdir_settings.update(dict(
PAGE_SAVE_AS=os.path.join('otherpages', '{slug}.html'),
PAGE_URL='otherpages/{slug}.html'))
- otherdir_page = Page(content="other page",
- metadata={'title': 'otherpage'}, settings=otherdir_settings,
+ otherdir_page = Page(
+ content="other page",
+ metadata={'title': 'otherpage'},
+ settings=otherdir_settings,
source_path=os.path.join('dir', 'otherpage.md'))
self.static.attach_to(otherdir_page)
@@ -497,8 +511,10 @@ class TestStatic(LoggedTestCase):
"""
original_save_as = self.static.save_as
- page = Page(content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
+ page = Page(
+ content="fake page",
+ metadata={'title': 'fakepage'},
+ settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page)
@@ -511,8 +527,10 @@ class TestStatic(LoggedTestCase):
"""
original_url = self.static.url
- page = Page(content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
+ page = Page(
+ content="fake page",
+ metadata={'title': 'fakepage'},
+ settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page)
@@ -523,13 +541,15 @@ class TestStatic(LoggedTestCase):
"""attach_to() does not override paths that were overridden elsewhere.
(For example, by the user with EXTRA_PATH_METADATA)
"""
- customstatic = Static(content=None,
+ customstatic = Static(
+ content=None,
metadata=dict(save_as='customfoo.jpg', url='customfoo.jpg'),
settings=self.settings,
source_path=os.path.join('dir', 'foo.jpg'),
context=self.settings.copy())
- page = Page(content="fake page",
+ page = Page(
+ content="fake page",
metadata={'title': 'fakepage'}, settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md'))
@@ -542,13 +562,16 @@ class TestStatic(LoggedTestCase):
"""{attach} link syntax triggers output path override & url replacement.
"""
html = 'link'
- page = Page(content=html,
- metadata={'title': 'fakepage'}, settings=self.settings,
+ page = Page(
+ content=html,
+ metadata={'title': 'fakepage'},
+ settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context)
content = page.get_content('')
- self.assertNotEqual(content, html,
+ self.assertNotEqual(
+ content, html,
"{attach} link syntax did not trigger URL replacement.")
expected_save_as = os.path.join('outpages', 'foo.jpg')
@@ -561,7 +584,8 @@ class TestStatic(LoggedTestCase):
html = 'link'
page = Page(
content=html,
- metadata={'title': 'fakepage'}, settings=self.settings,
+ metadata={'title': 'fakepage'},
+ settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context)
content = page.get_content('')
@@ -572,8 +596,10 @@ class TestStatic(LoggedTestCase):
"{category} link syntax triggers url replacement."
html = 'link'
- page = Page(content=html,
- metadata={'title': 'fakepage'}, settings=self.settings,
+ page = Page(
+ content=html,
+ metadata={'title': 'fakepage'},
+ settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context)
content = page.get_content('')
@@ -588,11 +614,11 @@ class TestStatic(LoggedTestCase):
metadata={'title': 'fakepage'}, settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context)
- content = page.get_content('')
+ content = page.get_content('')
self.assertEqual(content, html)
self.assertLogCountEqual(
- count=1,
- msg="Replacement Indicator 'unknown' not recognized, "
- "skipping replacement",
- level=logging.WARNING)
+ count=1,
+ msg="Replacement Indicator 'unknown' not recognized, "
+ "skipping replacement",
+ level=logging.WARNING)
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py
index c424b60f..2cfca04f 100644
--- a/pelican/tests/test_generators.py
+++ b/pelican/tests/test_generators.py
@@ -1,8 +1,18 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
+import locale
import os
+
from codecs import open
+from shutil import rmtree
+from tempfile import mkdtemp
+
+from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator,
+ StaticGenerator, TemplatePagesGenerator)
+from pelican.tests.support import get_settings, unittest
+from pelican.writers import Writer
+
try:
from unittest.mock import MagicMock
except ImportError:
@@ -10,14 +20,7 @@ except ImportError:
from mock import MagicMock
except ImportError:
MagicMock = False
-from shutil import rmtree
-from tempfile import mkdtemp
-from pelican.generators import (Generator, ArticlesGenerator, PagesGenerator,
- StaticGenerator, TemplatePagesGenerator)
-from pelican.writers import Writer
-from pelican.tests.support import unittest, get_settings
-import locale
CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, 'content')
@@ -35,7 +38,6 @@ class TestGenerator(unittest.TestCase):
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
-
def test_include_path(self):
self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'}
@@ -52,7 +54,8 @@ class TestGenerator(unittest.TestCase):
"""Test that Generator.get_files() properly excludes directories.
"""
# We use our own Generator so we can give it our own content path
- generator = Generator(context=self.settings.copy(),
+ generator = Generator(
+ context=self.settings.copy(),
settings=self.settings,
path=os.path.join(CUR_DIR, 'nested_content'),
theme=self.settings['THEME'], output_path=None)
@@ -60,34 +63,42 @@ class TestGenerator(unittest.TestCase):
filepaths = generator.get_files(paths=['maindir'])
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'}
- self.assertFalse(expected_files - found_files,
+ self.assertFalse(
+ expected_files - found_files,
"get_files() failed to find one or more files")
# Test string as `paths` argument rather than list
filepaths = generator.get_files(paths='maindir')
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'}
- self.assertFalse(expected_files - found_files,
+ self.assertFalse(
+ expected_files - found_files,
"get_files() failed to find one or more files")
filepaths = generator.get_files(paths=[''], exclude=['maindir'])
found_files = {os.path.basename(f) for f in filepaths}
- self.assertNotIn('maindir.md', found_files,
+ self.assertNotIn(
+ 'maindir.md', found_files,
"get_files() failed to exclude a top-level directory")
- self.assertNotIn('subdir.md', found_files,
+ self.assertNotIn(
+ 'subdir.md', found_files,
"get_files() failed to exclude a subdir of an excluded directory")
- filepaths = generator.get_files(paths=[''],
+ filepaths = generator.get_files(
+ paths=[''],
exclude=[os.path.join('maindir', 'subdir')])
found_files = {os.path.basename(f) for f in filepaths}
- self.assertNotIn('subdir.md', found_files,
+ self.assertNotIn(
+ 'subdir.md', found_files,
"get_files() failed to exclude a subdirectory")
filepaths = generator.get_files(paths=[''], exclude=['subdir'])
found_files = {os.path.basename(f) for f in filepaths}
- self.assertIn('subdir.md', found_files,
+ self.assertIn(
+ 'subdir.md', found_files,
"get_files() excluded a subdirectory by name, ignoring its path")
+
class TestArticlesGenerator(unittest.TestCase):
@classmethod
@@ -96,7 +107,7 @@ class TestArticlesGenerator(unittest.TestCase):
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None}
- settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
+ settings['CACHE_CONTENT'] = False
cls.generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
@@ -152,25 +163,30 @@ class TestArticlesGenerator(unittest.TestCase):
['Test mkd File', 'published', 'test', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'],
- ['Article with Nonconformant HTML meta tags', 'published', 'Default', 'article'],
+ ['Article with Nonconformant HTML meta tags', 'published',
+ 'Default', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'Default', 'article'],
['This is an article with category !', 'published', 'yeah',
'article'],
- ['This is an article with multiple authors!', 'published', 'Default', 'article'],
- ['This is an article with multiple authors!', 'published', 'Default', 'article'],
- ['This is an article with multiple authors in list format!', 'published', 'Default', 'article'],
- ['This is an article with multiple authors in lastname, firstname format!', 'published', 'Default', 'article'],
+ ['This is an article with multiple authors!', 'published',
+ 'Default', 'article'],
+ ['This is an article with multiple authors!', 'published',
+ 'Default', 'article'],
+ ['This is an article with multiple authors in list format!',
+ 'published', 'Default', 'article'],
+ ['This is an article with multiple authors in lastname, '
+ 'firstname format!', 'published', 'Default', 'article'],
['This is an article without category !', 'published', 'Default',
- 'article'],
+ 'article'],
['This is an article without category !', 'published',
'TestCategory', 'article'],
['An Article With Code Block To Test Typogrify Ignore',
- 'published', 'Default', 'article'],
- ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'published',
- '指導書', 'article'],
+ 'published', 'Default', 'article'],
+ ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定',
+ 'published', '指導書', 'article'],
]
self.assertEqual(sorted(articles_expected), sorted(self.articles))
@@ -292,7 +308,7 @@ class TestArticlesGenerator(unittest.TestCase):
generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970]
self.assertEqual(len(dates), 1)
- #among other things it must have at least been called with this
+ # among other things it must have at least been called with this
settings["period"] = (1970,)
write.assert_called_with("posts/1970/index.html",
generator.get_template("period_archives"),
@@ -300,37 +316,42 @@ class TestArticlesGenerator(unittest.TestCase):
blog=True, dates=dates)
del settings["period"]
- settings['MONTH_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/index.html'
+ settings['MONTH_ARCHIVE_SAVE_AS'] = \
+ 'posts/{date:%Y}/{date:%b}/index.html'
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
- dates = [d for d in generator.dates if d.date.year == 1970
- and d.date.month == 1]
+ dates = [d for d in generator.dates
+ if d.date.year == 1970 and d.date.month == 1]
self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January")
- #among other things it must have at least been called with this
+ # among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/index.html",
generator.get_template("period_archives"),
settings,
blog=True, dates=dates)
del settings["period"]
- settings['DAY_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
+ settings['DAY_ARCHIVE_SAVE_AS'] = \
+ 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
- dates = [d for d in generator.dates if d.date.year == 1970
- and d.date.month == 1
- and d.date.day == 1]
+ dates = [
+ d for d in generator.dates if
+ d.date.year == 1970 and
+ d.date.month == 1 and
+ d.date.day == 1
+ ]
self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January", 1)
- #among other things it must have at least been called with this
+ # among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/01/index.html",
generator.get_template("period_archives"),
settings,
@@ -347,11 +368,14 @@ class TestArticlesGenerator(unittest.TestCase):
def test_generate_authors(self):
"""Check authors generation."""
authors = [author.name for author, _ in self.generator.authors]
- authors_expected = sorted(['Alexis Métaireau', 'Author, First', 'Author, Second', 'First Author', 'Second Author'])
+ authors_expected = sorted(
+ ['Alexis Métaireau', 'Author, First', 'Author, Second',
+ 'First Author', 'Second Author'])
self.assertEqual(sorted(authors), authors_expected)
# test for slug
authors = [author.slug for author, _ in self.generator.authors]
- authors_expected = ['alexis-metaireau', 'author-first', 'author-second', 'first-author', 'second-author']
+ authors_expected = ['alexis-metaireau', 'author-first',
+ 'author-second', 'first-author', 'second-author']
self.assertEqual(sorted(authors), sorted(authors_expected))
def test_standard_metadata_in_default_metadata(self):
@@ -391,7 +415,6 @@ class TestArticlesGenerator(unittest.TestCase):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
settings['ARTICLE_ORDER_BY'] = 'title'
generator = ArticlesGenerator(
@@ -420,7 +443,8 @@ class TestArticlesGenerator(unittest.TestCase):
'This is a super article !',
'This is a super article !',
'This is an article with category !',
- 'This is an article with multiple authors in lastname, firstname format!',
+ ('This is an article with multiple authors in lastname, '
+ 'firstname format!'),
'This is an article with multiple authors in list format!',
'This is an article with multiple authors!',
'This is an article with multiple authors!',
@@ -435,7 +459,6 @@ class TestArticlesGenerator(unittest.TestCase):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
settings['ARTICLE_ORDER_BY'] = 'reversed-title'
generator = ArticlesGenerator(
@@ -561,7 +584,7 @@ class TestPageGenerator(unittest.TestCase):
are generated correctly on pages
"""
settings = get_settings(filenames={})
- settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
+ settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['CACHE_PATH'] = self.temp_cache
settings['DEFAULT_DATE'] = (1970, 1, 1)
@@ -586,7 +609,6 @@ class TestTemplatePagesGenerator(unittest.TestCase):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
-
def tearDown(self):
rmtree(self.temp_content)
rmtree(self.temp_output)
@@ -632,59 +654,67 @@ class TestStaticGenerator(unittest.TestCase):
def test_static_excludes(self):
"""Test that StaticGenerator respects STATIC_EXCLUDES.
"""
- settings = get_settings(STATIC_EXCLUDES=['subdir'],
- PATH=self.content_path, STATIC_PATHS=[''])
+ settings = get_settings(
+ STATIC_EXCLUDES=['subdir'],
+ PATH=self.content_path,
+ STATIC_PATHS=[''],
+ filenames={})
context = settings.copy()
- context['filenames'] = {}
- StaticGenerator(context=context, settings=settings,
+ StaticGenerator(
+ context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
- for c in context['staticfiles']]
+ for c in context['staticfiles']]
- self.assertNotIn('subdir_fake_image.jpg', staticnames,
+ self.assertNotIn(
+ 'subdir_fake_image.jpg', staticnames,
"StaticGenerator processed a file in a STATIC_EXCLUDES directory")
- self.assertIn('fake_image.jpg', staticnames,
+ self.assertIn(
+ 'fake_image.jpg', staticnames,
"StaticGenerator skipped a file that it should have included")
def test_static_exclude_sources(self):
"""Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES.
"""
- # Test STATIC_EXCLUDE_SOURCES=True
- settings = get_settings(STATIC_EXCLUDE_SOURCES=True,
- PATH=self.content_path, PAGE_PATHS=[''], STATIC_PATHS=[''],
- CACHE_CONTENT=False)
+ settings = get_settings(
+ STATIC_EXCLUDE_SOURCES=True,
+ PATH=self.content_path,
+ PAGE_PATHS=[''],
+ STATIC_PATHS=[''],
+ CACHE_CONTENT=False,
+ filenames={})
context = settings.copy()
- context['filenames'] = {}
for generator_class in (PagesGenerator, StaticGenerator):
- generator_class(context=context, settings=settings,
+ generator_class(
+ context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
- for c in context['staticfiles']]
+ for c in context['staticfiles']]
- self.assertFalse(any(name.endswith(".md") for name in staticnames),
+ self.assertFalse(
+ any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file")
- # Test STATIC_EXCLUDE_SOURCES=False
-
settings.update(STATIC_EXCLUDE_SOURCES=False)
context = settings.copy()
context['filenames'] = {}
for generator_class in (PagesGenerator, StaticGenerator):
- generator_class(context=context, settings=settings,
+ generator_class(
+ context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
- for c in context['staticfiles']]
+ for c in context['staticfiles']]
- self.assertTrue(any(name.endswith(".md") for name in staticnames),
+ self.assertTrue(
+ any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=False failed to include a markdown file")
-
diff --git a/pelican/tests/test_importer.py b/pelican/tests/test_importer.py
index 4ace5ccc..6af59212 100644
--- a/pelican/tests/test_importer.py
+++ b/pelican/tests/test_importer.py
@@ -1,16 +1,19 @@
-# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
+# -*- coding: utf-8 -*-
+from __future__ import print_function, unicode_literals
+import locale
import os
import re
-import locale
from codecs import open
-from pelican.tools.pelican_import import wp2fields, fields2pelican, decode_wp_content, build_header, build_markdown_header, get_attachments, download_attachments
-from pelican.tests.support import (unittest, temporary_folder, mute,
- skipIfNoExecutable)
-from pelican.utils import slugify, path_to_file_url
+from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder,
+ unittest)
+from pelican.tools.pelican_import import (build_header, build_markdown_header,
+ decode_wp_content,
+ download_attachments, fields2pelican,
+ get_attachments, wp2fields)
+from pelican.utils import path_to_file_url, slugify
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
@@ -32,7 +35,6 @@ except ImportError:
LXML = False
-
@skipIfNoExecutable(['pandoc', '--version'])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
class TestWordpressXmlImporter(unittest.TestCase):
@@ -48,17 +50,19 @@ class TestWordpressXmlImporter(unittest.TestCase):
def test_ignore_empty_posts(self):
self.assertTrue(self.posts)
- for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
- self.assertTrue(title.strip())
+ for (title, content, fname, date, author,
+ categ, tags, status, kind, format) in self.posts:
+ self.assertTrue(title.strip())
def test_recognise_page_kind(self):
""" Check that we recognise pages in wordpress, as opposed to posts """
self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = []
- for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
- if kind == 'page':
- pages_data.append((title, fname))
+ for (title, content, fname, date, author,
+ categ, tags, status, kind, format) in self.posts:
+ if kind == 'page':
+ pages_data.append((title, fname))
self.assertEqual(2, len(pages_data))
self.assertEqual(('Page', 'contact'), pages_data[0])
self.assertEqual(('Empty Page', 'empty'), pages_data[1])
@@ -67,7 +71,8 @@ class TestWordpressXmlImporter(unittest.TestCase):
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp:
- fname = list(silent_f2p(test_post, 'markdown', temp, dirpage=True))[0]
+ fname = list(silent_f2p(test_post, 'markdown',
+ temp, dirpage=True))[0]
self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
def test_dircat(self):
@@ -75,10 +80,11 @@ class TestWordpressXmlImporter(unittest.TestCase):
test_posts = []
for post in self.posts:
# check post kind
- if len(post[5]) > 0: # Has a category
+ if len(post[5]) > 0: # Has a category
test_posts.append(post)
with temporary_folder() as temp:
- fnames = list(silent_f2p(test_posts, 'markdown', temp, dircat=True))
+ fnames = list(silent_f2p(test_posts, 'markdown',
+ temp, dircat=True))
index = 0
for post in test_posts:
name = post[2]
@@ -92,25 +98,33 @@ class TestWordpressXmlImporter(unittest.TestCase):
def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts)
pages_data = []
- for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
- if kind == 'page' or kind == 'article':
- pass
- else:
- pages_data.append((title, fname))
+ for (title, content, fname, date, author, categ,
+ tags, status, kind, format) in self.posts:
+ if kind == 'page' or kind == 'article':
+ pass
+ else:
+ pages_data.append((title, fname))
self.assertEqual(0, len(pages_data))
def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts)
cust_data = []
- for title, content, fname, date, author, categ, tags, status, kind, format in self.custposts:
- if kind == 'article' or kind == 'page':
- pass
- else:
- cust_data.append((title, kind))
+ for (title, content, fname, date, author, categ,
+ tags, status, kind, format) in self.custposts:
+ if kind == 'article' or kind == 'page':
+ pass
+ else:
+ cust_data.append((title, kind))
self.assertEqual(3, len(cust_data))
- self.assertEqual(('A custom post in category 4', 'custom1'), cust_data[0])
- self.assertEqual(('A custom post in category 5', 'custom1'), cust_data[1])
- self.assertEqual(('A 2nd custom post type also in category 5', 'custom2'), cust_data[2])
+ self.assertEqual(
+ ('A custom post in category 4', 'custom1'),
+ cust_data[0])
+ self.assertEqual(
+ ('A custom post in category 5', 'custom1'),
+ cust_data[1])
+ self.assertEqual(
+ ('A 2nd custom post type also in category 5', 'custom2'),
+ cust_data[2])
def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican)
@@ -122,7 +136,8 @@ class TestWordpressXmlImporter(unittest.TestCase):
else:
test_posts.append(post)
with temporary_folder() as temp:
- fnames = list(silent_f2p(test_posts, 'markdown', temp, wp_custpost = True))
+ fnames = list(silent_f2p(test_posts, 'markdown',
+ temp, wp_custpost=True))
index = 0
for post in test_posts:
name = post[2]
@@ -144,7 +159,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
- wp_custpost=True, dircat=True))
+ wp_custpost=True, dircat=True))
index = 0
for post in test_posts:
name = post[2]
@@ -157,7 +172,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
index += 1
def test_wp_custpost_true_dirpage_false(self):
- #pages should only be put in their own directory when dirpage = True
+ # pages should only be put in their own directory when dirpage = True
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
@@ -166,7 +181,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
- wp_custpost=True, dirpage=False))
+ wp_custpost=True, dirpage=False))
index = 0
for post in test_posts:
name = post[2]
@@ -175,7 +190,6 @@ class TestWordpressXmlImporter(unittest.TestCase):
out_name = fnames[index]
self.assertFalse(out_name.endswith(filename))
-
def test_can_toggle_raw_html_code_parsing(self):
def r(f):
with open(f, encoding='utf-8') as infile:
@@ -184,10 +198,12 @@ class TestWordpressXmlImporter(unittest.TestCase):
with temporary_folder() as temp:
- rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp))
+ rst_files = (r(f) for f
+ in silent_f2p(self.posts, 'markdown', temp))
self.assertTrue(any('
%s: %s
\n%s' % (post.get('asking_name'), post.get('asking_url'), post.get('question'), post.get('answer')) + content = ('' + '%s' + ': %s' + '
\n' + ' %s' % (post.get('asking_name'), + post.get('asking_url'), + post.get('question'), + post.get('answer'))) content = content.rstrip() + '\n' kind = 'article' @@ -438,25 +490,30 @@ def tumblr2fields(api_key, blogname): offset += len(posts) posts = get_tumblr_posts(api_key, blogname, offset) + def feed2fields(file): """Read a feed and yield pelican fields""" import feedparser d = feedparser.parse(file) for entry in d.entries: - date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed) - if hasattr(entry, "updated_parsed") else None) - author = entry.author if hasattr(entry, "author") else None - tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None + date = (time.strftime('%Y-%m-%d %H:%M', entry.updated_parsed) + if hasattr(entry, 'updated_parsed') else None) + author = entry.author if hasattr(entry, 'author') else None + tags = ([e['term'] for e in entry.tags] + if hasattr(entry, 'tags') else None) slug = slugify(entry.title) kind = 'article' - yield (entry.title, entry.description, slug, date, author, [], tags, None, - kind, "html") + yield (entry.title, entry.description, slug, date, + author, [], tags, None, kind, 'html') + + +def build_header(title, date, author, categories, tags, slug, + status=None, attachments=None): + """Build a header from a list of fields""" -def build_header(title, date, author, categories, tags, slug, status=None, attachments=None): from docutils.utils import column_width - """Build a header from a list of fields""" header = '%s\n%s\n' % (title, '#' * column_width(title)) if date: header += ':date: %s\n' % date @@ -475,8 +532,9 @@ def build_header(title, date, author, categories, tags, slug, status=None, attac header += '\n' return header -def build_markdown_header(title, date, author, categories, tags, slug, status=None, - attachments=None): + +def build_markdown_header(title, date, author, categories, tags, + slug, status=None, attachments=None): """Build a header from a list of fields""" header = 'Title: %s\n' % title if date: @@ -496,6 +554,7 @@ def build_markdown_header(title, date, author, categories, tags, slug, status=No header += '\n' return header + def get_ext(out_markup, in_markup='html'): if in_markup == 'markdown' or out_markup == 'markdown': ext = '.md' @@ -503,26 +562,27 @@ def get_ext(out_markup, in_markup='html'): ext = '.rst' return ext + def get_out_filename(output_path, filename, ext, kind, - dirpage, dircat, categories, wp_custpost): + dirpage, dircat, categories, wp_custpost): filename = os.path.basename(filename) # Enforce filename restrictions for various filesystems at once; see # http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words # we do not need to filter words because an extension will be appended - filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars - filename = filename.lstrip('.') # should not start with a dot + filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars + filename = filename.lstrip('.') # should not start with a dot if not filename: filename = '_' - filename = filename[:249] # allow for 5 extra characters + filename = filename[:249] # allow for 5 extra characters - out_filename = os.path.join(output_path, filename+ext) + out_filename = os.path.join(output_path, filename + ext) # option to put page posts in pages/ subdirectory if dirpage and kind == 'page': pages_dir = os.path.join(output_path, 'pages') if not os.path.isdir(pages_dir): os.mkdir(pages_dir) - out_filename = os.path.join(pages_dir, filename+ext) + out_filename = os.path.join(pages_dir, filename + ext) elif not dirpage and kind == 'page': pass # option to put wp custom post types in directories with post type @@ -539,18 +599,19 @@ def get_out_filename(output_path, filename, ext, kind, else: catname = '' out_filename = os.path.join(output_path, typename, - catname, filename+ext) + catname, filename + ext) if not os.path.isdir(os.path.join(output_path, typename, catname)): os.makedirs(os.path.join(output_path, typename, catname)) # option to put files in directories with categories names elif dircat and (len(categories) > 0): catname = slugify(categories[0]) - out_filename = os.path.join(output_path, catname, filename+ext) + out_filename = os.path.join(output_path, catname, filename + ext) if not os.path.isdir(os.path.join(output_path, catname)): os.mkdir(os.path.join(output_path, catname)) return out_filename + def get_attachments(xml): """returns a dictionary of posts that have attachments with a list of the attachment_urls @@ -566,7 +627,7 @@ def get_attachments(xml): if kind == 'attachment': attachments.append((item.find('post_parent').string, - item.find('attachment_url').string)) + item.find('attachment_url').string)) else: filename = get_filename(filename, post_id) names[post_id] = filename @@ -575,7 +636,7 @@ def get_attachments(xml): try: parent_name = names[parent] except KeyError: - #attachment's parent is not a valid post + # attachment's parent is not a valid post parent_name = None try: @@ -585,6 +646,7 @@ def get_attachments(xml): attachedposts[parent_name].append(url) return attachedposts + def download_attachments(output_path, urls): """Downloads WordPress attachments and returns a list of paths to attachments that can be associated with a post (relative path to output @@ -592,8 +654,8 @@ def download_attachments(output_path, urls): locations = [] for url in urls: path = urlparse(url).path - #teardown path and rebuild to negate any errors with - #os.path.join and leading /'s + # teardown path and rebuild to negate any errors with + # os.path.join and leading /'s path = path.split('/') filename = path.pop(-1) localpath = '' @@ -608,12 +670,13 @@ def download_attachments(output_path, urls): urlretrieve(url, os.path.join(full_path, filename)) locations.append(os.path.join(localpath, filename)) except (URLError, IOError) as e: - #Python 2.7 throws an IOError rather Than URLError + # Python 2.7 throws an IOError rather Than URLError logger.warning("No file could be downloaded from %s\n%s", url, e) return locations -def fields2pelican(fields, out_markup, output_path, +def fields2pelican( + fields, out_markup, output_path, dircat=False, strip_raw=False, disable_slugs=False, dirpage=False, filename_template=None, filter_author=None, wp_custpost=False, wp_attach=False, attachments=None): @@ -634,24 +697,26 @@ def fields2pelican(fields, out_markup, output_path, ext = get_ext(out_markup, in_markup) if ext == '.md': - header = build_markdown_header(title, date, author, categories, - tags, slug, status, attached_files) + header = build_markdown_header( + title, date, author, categories, tags, slug, + status, attached_files) else: - out_markup = "rst" + out_markup = 'rst' header = build_header(title, date, author, categories, - tags, slug, status, attached_files) + tags, slug, status, attached_files) - out_filename = get_out_filename(output_path, filename, ext, - kind, dirpage, dircat, categories, wp_custpost) + out_filename = get_out_filename( + output_path, filename, ext, kind, dirpage, dircat, + categories, wp_custpost) print(out_filename) - if in_markup in ("html", "wp-html"): - html_filename = os.path.join(output_path, filename+'.html') + if in_markup in ('html', 'wp-html'): + html_filename = os.path.join(output_path, filename + '.html') with open(html_filename, 'w', encoding='utf-8') as fp: # Replace newlines with paragraphs wrapped with so
# HTML is valid before conversion
- if in_markup == "wp-html":
+ if in_markup == 'wp-html':
new_content = decode_wp_content(content)
else:
paragraphs = content.splitlines()
@@ -660,79 +725,95 @@ def fields2pelican(fields, out_markup, output_path,
fp.write(new_content)
-
parse_raw = '--parse-raw' if not strip_raw else ''
cmd = ('pandoc --normalize {0} --from=html'
- ' --to={1} -o "{2}" "{3}"').format(
- parse_raw, out_markup, out_filename, html_filename)
+ ' --to={1} -o "{2}" "{3}"')
+ cmd = cmd.format(parse_raw, out_markup,
+ out_filename, html_filename)
try:
rc = subprocess.call(cmd, shell=True)
if rc < 0:
- error = "Child was terminated by signal %d" % -rc
+ error = 'Child was terminated by signal %d' % -rc
exit(error)
elif rc > 0:
- error = "Please, check your Pandoc installation."
+ error = 'Please, check your Pandoc installation.'
exit(error)
except OSError as e:
- error = "Pandoc execution failed: %s" % e
+ error = 'Pandoc execution failed: %s' % e
exit(error)
os.remove(html_filename)
with open(out_filename, 'r', encoding='utf-8') as fs:
content = fs.read()
- if out_markup == "markdown":
- # In markdown, to insert a
, end a line with two or more spaces & then a end-of-line
- content = content.replace("\\\n ", " \n")
- content = content.replace("\\\n", " \n")
+ if out_markup == 'markdown':
+ # In markdown, to insert a
, end a line with two
+ # or more spaces & then a end-of-line
+ content = content.replace('\\\n ', ' \n')
+ content = content.replace('\\\n', ' \n')
with open(out_filename, 'w', encoding='utf-8') as fs:
fs.write(header + content)
if wp_attach and attachments and None in attachments:
print("downloading attachments that don't have a parent post")
urls = attachments[None]
- orphan_galleries = download_attachments(output_path, urls)
+ download_attachments(output_path, urls)
+
def main():
parser = argparse.ArgumentParser(
- description="Transform feed, WordPress, Tumblr, Dotclear, or Posterous "
- "files into reST (rst) or Markdown (md) files. Be sure to "
- "have pandoc installed.",
+ description="Transform feed, WordPress, Tumblr, Dotclear, or "
+ "Posterous files into reST (rst) or Markdown (md) files. "
+ "Be sure to have pandoc installed.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument(dest='input', help='The input file to read')
- parser.add_argument('--wpfile', action='store_true', dest='wpfile',
+ parser.add_argument(
+ dest='input', help='The input file to read')
+ parser.add_argument(
+ '--wpfile', action='store_true', dest='wpfile',
help='Wordpress XML export')
- parser.add_argument('--dotclear', action='store_true', dest='dotclear',
+ parser.add_argument(
+ '--dotclear', action='store_true', dest='dotclear',
help='Dotclear export')
- parser.add_argument('--posterous', action='store_true', dest='posterous',
+ parser.add_argument(
+ '--posterous', action='store_true', dest='posterous',
help='Posterous export')
- parser.add_argument('--tumblr', action='store_true', dest='tumblr',
+ parser.add_argument(
+ '--tumblr', action='store_true', dest='tumblr',
help='Tumblr export')
- parser.add_argument('--feed', action='store_true', dest='feed',
+ parser.add_argument(
+ '--feed', action='store_true', dest='feed',
help='Feed to parse')
- parser.add_argument('-o', '--output', dest='output', default='output',
+ parser.add_argument(
+ '-o', '--output', dest='output', default='output',
help='Output path')
- parser.add_argument('-m', '--markup', dest='markup', default='rst',
+ parser.add_argument(
+ '-m', '--markup', dest='markup', default='rst',
help='Output markup format (supports rst & markdown)')
- parser.add_argument('--dir-cat', action='store_true', dest='dircat',
+ parser.add_argument(
+ '--dir-cat', action='store_true', dest='dircat',
help='Put files in directories with categories name')
- parser.add_argument('--dir-page', action='store_true', dest='dirpage',
+ parser.add_argument(
+ '--dir-page', action='store_true', dest='dirpage',
help=('Put files recognised as pages in "pages/" sub-directory'
' (wordpress import only)'))
- parser.add_argument('--filter-author', dest='author',
+ parser.add_argument(
+ '--filter-author', dest='author',
help='Import only post from the specified author')
- parser.add_argument('--strip-raw', action='store_true', dest='strip_raw',
+ parser.add_argument(
+ '--strip-raw', action='store_true', dest='strip_raw',
help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)")
- parser.add_argument('--wp-custpost', action='store_true',
+ parser.add_argument(
+ '--wp-custpost', action='store_true',
dest='wp_custpost',
help='Put wordpress custom post types in directories. If used with '
'--dir-cat option directories will be created as '
'/post_type/category/ (wordpress import only)')
- parser.add_argument('--wp-attach', action='store_true', dest='wp_attach',
+ parser.add_argument(
+ '--wp-attach', action='store_true', dest='wp_attach',
help='(wordpress import only) Download files uploaded to wordpress as '
'attachments. Files will be added to posts as a list in the post '
'header. All files will be downloaded, even if '
@@ -740,16 +821,20 @@ def main():
'with their original path inside the output directory. '
'e.g. output/wp-uploads/date/postname/file.jpg '
'-- Requires an internet connection --')
- parser.add_argument('--disable-slugs', action='store_true',
+ parser.add_argument(
+ '--disable-slugs', action='store_true',
dest='disable_slugs',
help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.')
- parser.add_argument('-e', '--email', dest='email',
+ parser.add_argument(
+ '-e', '--email', dest='email',
help="Email address (posterous import only)")
- parser.add_argument('-p', '--password', dest='password',
+ parser.add_argument(
+ '-p', '--password', dest='password',
help="Password (posterous import only)")
- parser.add_argument('-b', '--blogname', dest='blogname',
+ parser.add_argument(
+ '-b', '--blogname', dest='blogname',
help="Blog name (Tumblr import only)")
args = parser.parse_args()
@@ -766,18 +851,20 @@ def main():
elif args.feed:
input_type = 'feed'
else:
- error = "You must provide either --wpfile, --dotclear, --posterous, --tumblr or --feed options"
+ error = ('You must provide either --wpfile, --dotclear, '
+ '--posterous, --tumblr or --feed options')
exit(error)
if not os.path.exists(args.output):
try:
os.mkdir(args.output)
except OSError:
- error = "Unable to create the output folder: " + args.output
+ error = 'Unable to create the output folder: ' + args.output
exit(error)
if args.wp_attach and input_type != 'wordpress':
- error = "You must be importing a wordpress xml to use the --wp-attach option"
+ error = ('You must be importing a wordpress xml '
+ 'to use the --wp-attach option')
exit(error)
if input_type == 'wordpress':
@@ -796,14 +883,14 @@ def main():
else:
attachments = None
- init() # init logging
-
+ # init logging
+ init()
fields2pelican(fields, args.markup, args.output,
dircat=args.dircat or False,
dirpage=args.dirpage or False,
strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False,
filter_author=args.author,
- wp_custpost = args.wp_custpost or False,
- wp_attach = args.wp_attach or False,
- attachments = attachments or None)
+ wp_custpost=args.wp_custpost or False,
+ wp_attach=args.wp_attach or False,
+ attachments=attachments or None)
diff --git a/pelican/tools/pelican_quickstart.py b/pelican/tools/pelican_quickstart.py
index 58da4649..e6ccf2a8 100755
--- a/pelican/tools/pelican_quickstart.py
+++ b/pelican/tools/pelican_quickstart.py
@@ -1,18 +1,20 @@
#!/usr/bin/env python
-
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
-import six
+from __future__ import print_function, unicode_literals
+import argparse
+import codecs
import os
import string
-import argparse
import sys
-import codecs
+
import pytz
+import six
+
from pelican import __version__
+
_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"templates")
@@ -44,9 +46,10 @@ CONF = {
'timezone': 'Europe/Paris'
}
-#url for list of valid timezones
+# url for list of valid timezones
_TZ_URL = 'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
+
def _input_compat(prompt):
if six.PY3:
r = input(prompt)
@@ -59,6 +62,7 @@ if six.PY3:
else:
str_compat = unicode
+
# Create a 'marked' default path, to determine if someone has supplied
# a path on the command-line.
class _DEFAULT_PATH_TYPE(str_compat):
@@ -66,6 +70,7 @@ class _DEFAULT_PATH_TYPE(str_compat):
_DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
+
def decoding_strings(f):
def wrapper(*args, **kwargs):
out = f(*args, **kwargs)
@@ -164,7 +169,8 @@ def ask(question, answer=str_compat, default=None, l=None):
print('You must enter an integer')
return r
else:
- raise NotImplemented('Argument `answer` must be str_compat, bool, or integer')
+ raise NotImplemented(
+ 'Argument `answer` must be str_compat, bool, or integer')
def ask_timezone(question, default, tzurl):
@@ -177,7 +183,8 @@ def ask_timezone(question, default, tzurl):
r = pytz.all_timezones[lower_tz.index(r)]
break
else:
- print('Please enter a valid time zone:\n (check [{0}])'.format(tzurl))
+ print('Please enter a valid time zone:\n'
+ ' (check [{0}])'.format(tzurl))
return r
@@ -186,13 +193,13 @@ def main():
description="A kickstarter for Pelican",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--path', default=_DEFAULT_PATH,
- help="The path to generate the blog into")
+ help="The path to generate the blog into")
parser.add_argument('-t', '--title', metavar="title",
- help='Set the title of the website')
+ help='Set the title of the website')
parser.add_argument('-a', '--author', metavar="author",
- help='Set the author name of the website')
+ help='Set the author name of the website')
parser.add_argument('-l', '--lang', metavar="lang",
- help='Set the default web site language')
+ help='Set the default web site language')
args = parser.parse_args()
@@ -214,50 +221,94 @@ needed by Pelican.
'Will save to:\n%s\n' % CONF['basedir'])
else:
CONF['basedir'] = os.path.abspath(os.path.expanduser(
- ask('Where do you want to create your new web site?', answer=str_compat, default=args.path)))
+ ask('Where do you want to create your new web site?',
+ answer=str_compat, default=args.path)))
- CONF['sitename'] = ask('What will be the title of this web site?', answer=str_compat, default=args.title)
- CONF['author'] = ask('Who will be the author of this web site?', answer=str_compat, default=args.author)
- CONF['lang'] = ask('What will be the default language of this web site?', str_compat, args.lang or CONF['lang'], 2)
+ CONF['sitename'] = ask('What will be the title of this web site?',
+ answer=str_compat, default=args.title)
+ CONF['author'] = ask('Who will be the author of this web site?',
+ answer=str_compat, default=args.author)
+ CONF['lang'] = ask('What will be the default language of this web site?',
+ str_compat, args.lang or CONF['lang'], 2)
- if ask('Do you want to specify a URL prefix? e.g., http://example.com ', answer=bool, default=True):
- CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str_compat, CONF['siteurl'])
+ if ask('Do you want to specify a URL prefix? e.g., http://example.com ',
+ answer=bool, default=True):
+ CONF['siteurl'] = ask('What is your URL prefix? (see '
+ 'above example; no trailing slash)',
+ str_compat, CONF['siteurl'])
- CONF['with_pagination'] = ask('Do you want to enable article pagination?', bool, bool(CONF['default_pagination']))
+ CONF['with_pagination'] = ask('Do you want to enable article pagination?',
+ bool, bool(CONF['default_pagination']))
if CONF['with_pagination']:
- CONF['default_pagination'] = ask('How many articles per page do you want?', int, CONF['default_pagination'])
+ CONF['default_pagination'] = ask('How many articles per page '
+ 'do you want?',
+ int, CONF['default_pagination'])
else:
CONF['default_pagination'] = False
- CONF['timezone'] = ask_timezone('What is your time zone?', CONF['timezone'], _TZ_URL)
+ CONF['timezone'] = ask_timezone('What is your time zone?',
+ CONF['timezone'], _TZ_URL)
- automation = ask('Do you want to generate a Fabfile/Makefile to automate generation and publishing?', bool, True)
- develop = ask('Do you want an auto-reload & simpleHTTP script to assist with theme and site development?', bool, True)
+ automation = ask('Do you want to generate a Fabfile/Makefile '
+ 'to automate generation and publishing?', bool, True)
+ develop = ask('Do you want an auto-reload & simpleHTTP script '
+ 'to assist with theme and site development?', bool, True)
if automation:
- if ask('Do you want to upload your website using FTP?', answer=bool, default=False):
- CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str_compat, CONF['ftp_host'])
- CONF['ftp_user'] = ask('What is your username on that server?', str_compat, CONF['ftp_user'])
- CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ftp_target_dir'])
- if ask('Do you want to upload your website using SSH?', answer=bool, default=False):
- CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str_compat, CONF['ssh_host'])
- CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port'])
- CONF['ssh_user'] = ask('What is your username on that server?', str_compat, CONF['ssh_user'])
- CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ssh_target_dir'])
- if ask('Do you want to upload your website using Dropbox?', answer=bool, default=False):
- CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str_compat, CONF['dropbox_dir'])
- if ask('Do you want to upload your website using S3?', answer=bool, default=False):
- CONF['s3_bucket'] = ask('What is the name of your S3 bucket?', str_compat, CONF['s3_bucket'])
- if ask('Do you want to upload your website using Rackspace Cloud Files?', answer=bool, default=False):
- CONF['cloudfiles_username'] = ask('What is your Rackspace Cloud username?', str_compat, CONF['cloudfiles_username'])
- CONF['cloudfiles_api_key'] = ask('What is your Rackspace Cloud API key?', str_compat, CONF['cloudfiles_api_key'])
- CONF['cloudfiles_container'] = ask('What is the name of your Cloud Files container?', str_compat, CONF['cloudfiles_container'])
- if ask('Do you want to upload your website using GitHub Pages?', answer=bool, default=False):
- if ask('Is this your personal page (username.github.io)?', answer=bool, default=False):
- CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['personal']
+ if ask('Do you want to upload your website using FTP?',
+ answer=bool, default=False):
+ CONF['ftp_host'] = ask('What is the hostname of your FTP server?',
+ str_compat, CONF['ftp_host'])
+ CONF['ftp_user'] = ask('What is your username on that server?',
+ str_compat, CONF['ftp_user'])
+ CONF['ftp_target_dir'] = ask('Where do you want to put your '
+ 'web site on that server?',
+ str_compat, CONF['ftp_target_dir'])
+ if ask('Do you want to upload your website using SSH?',
+ answer=bool, default=False):
+ CONF['ssh_host'] = ask('What is the hostname of your SSH server?',
+ str_compat, CONF['ssh_host'])
+ CONF['ssh_port'] = ask('What is the port of your SSH server?',
+ int, CONF['ssh_port'])
+ CONF['ssh_user'] = ask('What is your username on that server?',
+ str_compat, CONF['ssh_user'])
+ CONF['ssh_target_dir'] = ask('Where do you want to put your '
+ 'web site on that server?',
+ str_compat, CONF['ssh_target_dir'])
+
+ if ask('Do you want to upload your website using Dropbox?',
+ answer=bool, default=False):
+ CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',
+ str_compat, CONF['dropbox_dir'])
+
+ if ask('Do you want to upload your website using S3?',
+ answer=bool, default=False):
+ CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',
+ str_compat, CONF['s3_bucket'])
+
+ if ask('Do you want to upload your website using '
+ 'Rackspace Cloud Files?', answer=bool, default=False):
+ CONF['cloudfiles_username'] = ask('What is your Rackspace '
+ 'Cloud username?', str_compat,
+ CONF['cloudfiles_username'])
+ CONF['cloudfiles_api_key'] = ask('What is your Rackspace '
+ 'Cloud API key?', str_compat,
+ CONF['cloudfiles_api_key'])
+ CONF['cloudfiles_container'] = ask('What is the name of your '
+ 'Cloud Files container?',
+ str_compat,
+ CONF['cloudfiles_container'])
+
+ if ask('Do you want to upload your website using GitHub Pages?',
+ answer=bool, default=False):
+ if ask('Is this your personal page (username.github.io)?',
+ answer=bool, default=False):
+ CONF['github_pages_branch'] = \
+ _GITHUB_PAGES_BRANCHES['personal']
else:
- CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['project']
+ CONF['github_pages_branch'] = \
+ _GITHUB_PAGES_BRANCHES['project']
try:
os.makedirs(os.path.join(CONF['basedir'], 'content'))
@@ -270,7 +321,8 @@ needed by Pelican.
print('Error: {0}'.format(e))
try:
- with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w', 'utf-8') as fd:
+ with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'),
+ 'w', 'utf-8') as fd:
conf_python = dict()
for key, value in CONF.items():
conf_python[key] = repr(value)
@@ -283,7 +335,8 @@ needed by Pelican.
print('Error: {0}'.format(e))
try:
- with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'), 'w', 'utf-8') as fd:
+ with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'),
+ 'w', 'utf-8') as fd:
for line in get_template('publishconf.py'):
template = string.Template(line)
fd.write(template.safe_substitute(CONF))
@@ -293,7 +346,8 @@ needed by Pelican.
if automation:
try:
- with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'), 'w', 'utf-8') as fd:
+ with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'),
+ 'w', 'utf-8') as fd:
for line in get_template('fabfile.py'):
template = string.Template(line)
fd.write(template.safe_substitute(CONF))
@@ -301,7 +355,8 @@ needed by Pelican.
except OSError as e:
print('Error: {0}'.format(e))
try:
- with codecs.open(os.path.join(CONF['basedir'], 'Makefile'), 'w', 'utf-8') as fd:
+ with codecs.open(os.path.join(CONF['basedir'], 'Makefile'),
+ 'w', 'utf-8') as fd:
mkfile_template_name = 'Makefile'
py_v = 'PY?=python'
if six.PY3:
@@ -323,7 +378,9 @@ needed by Pelican.
value = '"' + value.replace('"', '\\"') + '"'
conf_shell[key] = value
try:
- with codecs.open(os.path.join(CONF['basedir'], 'develop_server.sh'), 'w', 'utf-8') as fd:
+ with codecs.open(os.path.join(CONF['basedir'],
+ 'develop_server.sh'),
+ 'w', 'utf-8') as fd:
lines = list(get_template('develop_server.sh'))
py_v = 'PY=${PY:-python}\n'
if six.PY3:
@@ -333,7 +390,10 @@ needed by Pelican.
template = string.Template(line)
fd.write(template.safe_substitute(conf_shell))
fd.close()
- os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 493) # mode 0o755
+
+ # mode 0o755
+ os.chmod((os.path.join(CONF['basedir'],
+ 'develop_server.sh')), 493)
except OSError as e:
print('Error: {0}'.format(e))
diff --git a/pelican/tools/pelican_themes.py b/pelican/tools/pelican_themes.py
index 8d71535d..e4bcb7c9 100755
--- a/pelican/tools/pelican_themes.py
+++ b/pelican/tools/pelican_themes.py
@@ -1,33 +1,12 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
-
-import six
+from __future__ import print_function, unicode_literals
import argparse
import os
import shutil
import sys
-try:
- import pelican
-except:
- err('Cannot import pelican.\nYou must install Pelican in order to run this script.', -1)
-
-
-global _THEMES_PATH
-_THEMES_PATH = os.path.join(
- os.path.dirname(
- os.path.abspath(
- pelican.__file__
- )
- ),
- 'themes'
-)
-
-__version__ = '0.2'
-_BUILTIN_THEMES = ['simple', 'notmyidea']
-
def err(msg, die=None):
"""Print an error message and exits if an exit code is given"""
@@ -35,43 +14,71 @@ def err(msg, die=None):
if die:
sys.exit((die if type(die) is int else 1))
+try:
+ import pelican
+except:
+ err('Cannot import pelican.\nYou must '
+ 'install Pelican in order to run this script.',
+ -1)
+
+
+global _THEMES_PATH
+_THEMES_PATH = os.path.join(
+ os.path.dirname(
+ os.path.abspath(pelican.__file__)
+ ),
+ 'themes'
+)
+
+__version__ = '0.2'
+_BUILTIN_THEMES = ['simple', 'notmyidea']
+
def main():
"""Main function"""
- parser = argparse.ArgumentParser(description="""Install themes for Pelican""")
+ parser = argparse.ArgumentParser(
+ description="""Install themes for Pelican""")
- excl= parser.add_mutually_exclusive_group()
- excl.add_argument('-l', '--list', dest='action', action="store_const", const='list',
+ excl = parser.add_mutually_exclusive_group()
+ excl.add_argument(
+ '-l', '--list', dest='action', action="store_const", const='list',
help="Show the themes already installed and exit")
- excl.add_argument('-p', '--path', dest='action', action="store_const", const='path',
+ excl.add_argument(
+ '-p', '--path', dest='action', action="store_const", const='path',
help="Show the themes path and exit")
- excl.add_argument('-V', '--version', action='version', version='pelican-themes v{0}'.format(__version__),
+ excl.add_argument(
+ '-V', '--version', action='version',
+ version='pelican-themes v{0}'.format(__version__),
help='Print the version of this script')
-
- parser.add_argument('-i', '--install', dest='to_install', nargs='+', metavar="theme path",
+ parser.add_argument(
+ '-i', '--install', dest='to_install', nargs='+', metavar="theme path",
help='The themes to install')
- parser.add_argument('-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
+ parser.add_argument(
+ '-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
help='The themes to remove')
- parser.add_argument('-U', '--upgrade', dest='to_upgrade', nargs='+',
- metavar="theme path", help='The themes to upgrade')
- parser.add_argument('-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
- help="Same as `--install', but create a symbolic link instead of copying the theme. Useful for theme development")
- parser.add_argument('-c', '--clean', dest='clean', action="store_true",
+ parser.add_argument(
+ '-U', '--upgrade', dest='to_upgrade', nargs='+',
+ metavar="theme path", help='The themes to upgrade')
+ parser.add_argument(
+ '-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
+ help="Same as `--install', but create a symbolic link instead of "
+ "copying the theme. Useful for theme development")
+ parser.add_argument(
+ '-c', '--clean', dest='clean', action="store_true",
help="Remove the broken symbolic links of the theme path")
-
- parser.add_argument('-v', '--verbose', dest='verbose', action="store_true",
+ parser.add_argument(
+ '-v', '--verbose', dest='verbose',
+ action="store_true",
help="Verbose output")
-
args = parser.parse_args()
-
+
to_install = args.to_install or args.to_upgrade
to_sym = args.to_symlink or args.clean
-
if args.action:
if args.action is 'list':
list_themes(args.verbose)
@@ -95,7 +102,7 @@ def main():
if args.to_upgrade:
if args.verbose:
print('Upgrading themes...')
-
+
for i in args.to_upgrade:
install(i, v=args.verbose, u=True)
@@ -144,11 +151,13 @@ def list_themes(v=False):
def remove(theme_name, v=False):
"""Removes a theme"""
- theme_name = theme_name.replace('/','')
+ theme_name = theme_name.replace('/', '')
target = os.path.join(_THEMES_PATH, theme_name)
if theme_name in _BUILTIN_THEMES:
- err(theme_name + ' is a builtin theme.\nYou cannot remove a builtin theme with this script, remove it by hand if you want.')
+ err(theme_name + ' is a builtin theme.\n'
+ 'You cannot remove a builtin theme with this script, '
+ 'remove it by hand if you want.')
elif os.path.islink(target):
if v:
print('Removing link `' + target + "'")
@@ -180,7 +189,8 @@ def install(path, v=False, u=False):
install(path, v)
else:
if v:
- print("Copying `{p}' to `{t}' ...".format(p=path, t=theme_path))
+ print("Copying '{p}' to '{t}' ...".format(p=path,
+ t=theme_path))
try:
shutil.copytree(path, theme_path)
@@ -189,14 +199,18 @@ def install(path, v=False, u=False):
for root, dirs, files in os.walk(theme_path):
for d in dirs:
dname = os.path.join(root, d)
- os.chmod(dname, 493) # 0o755
+ os.chmod(dname, 493) # 0o755
for f in files:
fname = os.path.join(root, f)
- os.chmod(fname, 420) # 0o644
+ os.chmod(fname, 420) # 0o644
except OSError as e:
- err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False)
+ err("Cannot change permissions of files "
+ "or directory in `{r}':\n{e}".format(r=theme_path,
+ e=str(e)),
+ die=False)
except Exception as e:
- err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
+ err("Cannot copy `{p}' to `{t}':\n{e}".format(
+ p=path, t=theme_path, e=str(e)))
def symlink(path, v=False):
@@ -212,11 +226,13 @@ def symlink(path, v=False):
err(path + ' : already exists')
else:
if v:
- print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
+ print("Linking `{p}' to `{t}' ...".format(
+ p=path, t=theme_path))
try:
os.symlink(path, theme_path)
except Exception as e:
- err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
+ err("Cannot link `{p}' to `{t}':\n{e}".format(
+ p=path, t=theme_path, e=str(e)))
def is_broken_link(path):
@@ -227,7 +243,7 @@ def is_broken_link(path):
def clean(v=False):
"""Removes the broken symbolic links"""
- c=0
+ c = 0
for path in os.listdir(_THEMES_PATH):
path = os.path.join(_THEMES_PATH, path)
if os.path.islink(path):
@@ -236,9 +252,9 @@ def clean(v=False):
print('Removing {0}'.format(path))
try:
os.remove(path)
- except OSError as e:
+ except OSError:
print('Error: cannot remove {0}'.format(path))
else:
- c+=1
+ c += 1
print("\nRemoved {0} broken links".format(c))
diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py
index 65dee23b..bf1199a8 100644
--- a/pelican/urlwrappers.py
+++ b/pelican/urlwrappers.py
@@ -4,9 +4,10 @@ from __future__ import unicode_literals
import functools
import logging
import os
+
import six
-from pelican.utils import (slugify, python_2_unicode_compatible)
+from pelican.utils import python_2_unicode_compatible, slugify
logger = logging.getLogger(__name__)
diff --git a/pelican/utils.py b/pelican/utils.py
index 43dca212..786a9425 100644
--- a/pelican/utils.py
+++ b/pelican/utils.py
@@ -1,29 +1,30 @@
# -*- coding: utf-8 -*-
-from __future__ import unicode_literals, print_function
-import six
+from __future__ import print_function, unicode_literals
import codecs
+import datetime
import errno
import fnmatch
import locale
import logging
import os
-import pytz
import re
import shutil
import sys
import traceback
-import pickle
-import datetime
-
from collections import Hashable
from contextlib import contextmanager
-import dateutil.parser
from functools import partial
from itertools import groupby
-from jinja2 import Markup
from operator import attrgetter
-from posixpath import join as posix_join
+
+import dateutil.parser
+
+from jinja2 import Markup
+
+import pytz
+
+import six
from six.moves.html_parser import HTMLParser
logger = logging.getLogger(__name__)
@@ -43,9 +44,9 @@ def strftime(date, date_format):
formatting them with the date, (if necessary) decoding the output and
replacing formatted output back.
'''
-
+ def strip_zeros(x):
+ return x.lstrip('0') or '0'
c89_directives = 'aAbBcdfHIjmMpSUwWxXyYzZ%'
- strip_zeros = lambda x: x.lstrip('0') or '0'
# grab candidate format options
format_options = '%[-]?.'
@@ -200,8 +201,8 @@ def deprecated_attribute(old, new, since=None, remove=None, doc=None):
' and will be removed by version {}'.format(version))
message.append('. Use {} instead.'.format(new))
logger.warning(''.join(message))
- logger.debug(''.join(
- six.text_type(x) for x in traceback.format_stack()))
+ logger.debug(''.join(six.text_type(x) for x
+ in traceback.format_stack()))
def fget(self):
_warn()
@@ -224,7 +225,7 @@ def get_date(string):
"""
string = re.sub(' +', ' ', string)
default = SafeDatetime.now().replace(hour=0, minute=0,
- second=0, microsecond=0)
+ second=0, microsecond=0)
try:
return dateutil.parser.parse(string, default=default)
except (TypeError, ValueError):
@@ -319,12 +320,12 @@ def copy(source, destination, ignores=None):
for src_dir, subdirs, others in os.walk(source_):
dst_dir = os.path.join(destination_,
- os.path.relpath(src_dir, source_))
+ os.path.relpath(src_dir, source_))
subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i)
for i in ignores))
- others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
- for i in ignores))
+ others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
+ for i in ignores))
if not os.path.isdir(dst_dir):
logger.info('Creating directory %s', dst_dir)
@@ -338,9 +339,11 @@ def copy(source, destination, ignores=None):
logger.info('Copying %s to %s', src_path, dst_path)
shutil.copy2(src_path, dst_path)
else:
- logger.warning('Skipped copy %s (not a file or directory) to %s',
+ logger.warning('Skipped copy %s (not a file or '
+ 'directory) to %s',
src_path, dst_path)
+
def clean_output_dir(path, retention):
"""Remove all files from output directory except those in retention list"""
@@ -366,8 +369,8 @@ def clean_output_dir(path, retention):
shutil.rmtree(file)
logger.debug("Deleted directory %s", file)
except Exception as e:
- logger.error("Unable to delete directory %s; %s",
- file, e)
+ logger.error("Unable to delete directory %s; %s",
+ file, e)
elif os.path.isfile(file) or os.path.islink(file):
try:
os.remove(file)
@@ -507,12 +510,12 @@ def process_translations(content_list, order_by=None):
for slug, items in grouped_by_slugs:
items = list(items)
- # items with `translation` metadata will be used as translations…
+ # items with `translation` metadata will be used as translations...
default_lang_items = list(filter(
- lambda i: i.metadata.get('translation', 'false').lower()
- == 'false',
- items))
- # …unless all items with that slug are translations
+ lambda i:
+ i.metadata.get('translation', 'false').lower() == 'false',
+ items))
+ # ...unless all items with that slug are translations
if not default_lang_items:
default_lang_items = items
@@ -522,13 +525,14 @@ def process_translations(content_list, order_by=None):
len_ = len(lang_items)
if len_ > 1:
logger.warning('There are %s variants of "%s" with lang %s',
- len_, slug, lang)
+ len_, slug, lang)
for x in lang_items:
logger.warning('\t%s', x.source_path)
# find items with default language
- default_lang_items = list(filter(attrgetter('in_default_lang'),
- default_lang_items))
+ default_lang_items = list(filter(
+ attrgetter('in_default_lang'),
+ default_lang_items))
# if there is no article with default language, take an other one
if not default_lang_items:
@@ -536,10 +540,9 @@ def process_translations(content_list, order_by=None):
if not slug:
logger.warning(
- 'empty slug for %s. '
- 'You can fix this by adding a title or a slug to your '
- 'content',
- default_lang_items[0].source_path)
+ 'Empty slug for %s. You can fix this by '
+ 'adding a title or a slug to your content',
+ default_lang_items[0].source_path)
index.extend(default_lang_items)
translations.extend([x for x in items if x not in default_lang_items])
for a in items:
@@ -567,10 +570,12 @@ def process_translations(content_list, order_by=None):
index.sort(key=attrgetter(order_by),
reverse=order_reversed)
except AttributeError:
- logger.warning('There is no "%s" attribute in the item '
+ logger.warning(
+ 'There is no "%s" attribute in the item '
'metadata. Defaulting to slug order.', order_by)
else:
- logger.warning('Invalid *_ORDER_BY setting (%s).'
+ logger.warning(
+ 'Invalid *_ORDER_BY setting (%s).'
'Valid options are strings and functions.', order_by)
return index, translations
@@ -589,12 +594,12 @@ def folder_watcher(path, extensions, ignores=[]):
dirs[:] = [x for x in dirs if not x.startswith(os.curdir)]
for f in files:
- if (f.endswith(tuple(extensions)) and
- not any(fnmatch.fnmatch(f, ignore) for ignore in ignores)):
- try:
- yield os.stat(os.path.join(root, f)).st_mtime
- except OSError as e:
- logger.warning('Caught Exception: %s', e)
+ if f.endswith(tuple(extensions)) and \
+ not any(fnmatch.fnmatch(f, ignore) for ignore in ignores):
+ try:
+ yield os.stat(os.path.join(root, f)).st_mtime
+ except OSError as e:
+ logger.warning('Caught Exception: %s', e)
LAST_MTIME = 0
while True:
diff --git a/pelican/writers.py b/pelican/writers.py
index e90a0004..4df7b859 100644
--- a/pelican/writers.py
+++ b/pelican/writers.py
@@ -1,22 +1,24 @@
# -*- coding: utf-8 -*-
-from __future__ import with_statement, unicode_literals, print_function
-import six
+from __future__ import print_function, unicode_literals, with_statement
-import os
import logging
+import os
+
+from feedgenerator import Atom1Feed, Rss201rev2Feed
+
+from jinja2 import Markup
+
+import six
+from six.moves.urllib.parse import urlparse
+
+from pelican import signals
+from pelican.paginator import Paginator
+from pelican.utils import (get_relative_path, is_selected_for_writing,
+ path_to_url, set_date_tzinfo)
if not six.PY3:
from codecs import open
-from feedgenerator import Atom1Feed, Rss201rev2Feed
-from jinja2 import Markup
-from six.moves.urllib.parse import urlparse
-
-from pelican.paginator import Paginator
-from pelican.utils import (get_relative_path, path_to_url, set_date_tzinfo,
- is_selected_for_writing)
-from pelican import signals
-
logger = logging.getLogger(__name__)
@@ -119,10 +121,10 @@ class Writer(object):
feed.write(fp, 'utf-8')
logger.info('Writing %s', complete_path)
- signals.feed_written.send(complete_path, context=context, feed=feed)
+ signals.feed_written.send(
+ complete_path, context=context, feed=feed)
return feed
-
def write_file(self, name, template, context, relative_urls=False,
paginated=None, override_output=False, **kwargs):
"""Render the template and write the file.
@@ -139,9 +141,10 @@ class Writer(object):
:param **kwargs: additional variables to pass to the templates
"""
- if name is False or name == "" or\
- not is_selected_for_writing(self.settings,\
- os.path.join(self.output_path, name)):
+ if name is False or \
+ name == "" or \
+ not is_selected_for_writing(self.settings,
+ os.path.join(self.output_path, name)):
return
elif not name:
# other stuff, just return for now
@@ -169,7 +172,8 @@ class Writer(object):
def _get_localcontext(context, name, kwargs, relative_urls):
localcontext = context.copy()
- localcontext['localsiteurl'] = localcontext.get('localsiteurl', None)
+ localcontext['localsiteurl'] = localcontext.get(
+ 'localsiteurl', None)
if relative_urls:
relative_url = path_to_url(get_relative_path(name))
localcontext['SITEURL'] = relative_url
@@ -201,11 +205,13 @@ class Writer(object):
'%s_previous_page' % key: previous_page,
'%s_next_page' % key: next_page})
- localcontext = _get_localcontext(context, page.save_as, paginated_kwargs, relative_urls)
+ localcontext = _get_localcontext(
+ context, page.save_as, paginated_kwargs, relative_urls)
_write_file(template, localcontext, self.output_path,
page.save_as, override_output)
else:
# no pagination
- localcontext = _get_localcontext(context, name, kwargs, relative_urls)
+ localcontext = _get_localcontext(
+ context, name, kwargs, relative_urls)
_write_file(template, localcontext, self.output_path, name,
override_output)
diff --git a/tox.ini b/tox.ini
index ff16929e..56ad0c14 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py{27,33,34},docs
+envlist = py{27,33,34},docs,flake8
[testenv]
basepython =
@@ -27,3 +27,16 @@ deps =
changedir = docs
commands =
sphinx-build -W -b html -d {envtmpdir}/doctrees . _build/html
+
+[flake8]
+application-import-names = pelican
+import-order-style = cryptography
+
+[testenv:flake8]
+basepython = python2.7
+deps =
+ flake8 <= 2.4.1
+ git+https://github.com/public/flake8-import-order@2ac7052a4e02b4a8a0125a106d87465a3b9fd688
+commands =
+ flake8 --version
+ flake8 pelican