fulfil pep8 standard

This commit is contained in:
derwinlu 2015-06-16 09:25:09 +02:00 committed by winlu
commit 8993c55e6e
31 changed files with 1259 additions and 868 deletions

View file

@ -1,45 +1,41 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import six
import argparse
import collections
import locale
import logging
import os import os
import re import re
import sys import sys
import time import time
import logging
import argparse import six
import locale
import collections
# pelican.log has to be the first pelican module to be loaded # pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger # because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init from pelican.log import init # noqa
from pelican import signals from pelican import signals
from pelican.generators import (ArticlesGenerator, PagesGenerator, from pelican.generators import (ArticlesGenerator, PagesGenerator,
StaticGenerator, SourceFileGenerator, SourceFileGenerator, StaticGenerator,
TemplatePagesGenerator) TemplatePagesGenerator)
from pelican.readers import Readers from pelican.readers import Readers
from pelican.settings import read_settings from pelican.settings import read_settings
from pelican.utils import (clean_output_dir, folder_watcher, from pelican.utils import (clean_output_dir, file_watcher,
file_watcher, maybe_pluralize) folder_watcher, maybe_pluralize)
from pelican.writers import Writer from pelican.writers import Writer
__version__ = "3.6.4.dev0" __version__ = "3.6.4.dev0"
DEFAULT_CONFIG_NAME = 'pelicanconf.py' DEFAULT_CONFIG_NAME = 'pelicanconf.py'
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Pelican(object): class Pelican(object):
def __init__(self, settings): def __init__(self, settings):
""" """Pelican initialisation
Pelican initialisation, performs some checks on the environment before
doing anything else. Performs some checks on the environment before doing anything else.
""" """
# define the default settings # define the default settings
@ -152,7 +148,7 @@ class Pelican(object):
context = self.settings.copy() context = self.settings.copy()
# Share these among all the generators and content objects: # Share these among all the generators and content objects:
context['filenames'] = {} # maps source path to Content object or None context['filenames'] = {} # maps source path to Content object or None
context['localsiteurl'] = self.settings['SITEURL'] context['localsiteurl'] = self.settings['SITEURL']
generators = [ generators = [
cls( cls(
@ -190,23 +186,23 @@ class Pelican(object):
if isinstance(g, PagesGenerator)) if isinstance(g, PagesGenerator))
pluralized_articles = maybe_pluralize( pluralized_articles = maybe_pluralize(
len(articles_generator.articles) + (len(articles_generator.articles) +
len(articles_generator.translations), len(articles_generator.translations)),
'article', 'article',
'articles') 'articles')
pluralized_drafts = maybe_pluralize( pluralized_drafts = maybe_pluralize(
len(articles_generator.drafts) + (len(articles_generator.drafts) +
len(articles_generator.drafts_translations), len(articles_generator.drafts_translations)),
'draft', 'draft',
'drafts') 'drafts')
pluralized_pages = maybe_pluralize( pluralized_pages = maybe_pluralize(
len(pages_generator.pages) + (len(pages_generator.pages) +
len(pages_generator.translations), len(pages_generator.translations)),
'page', 'page',
'pages') 'pages')
pluralized_hidden_pages = maybe_pluralize( pluralized_hidden_pages = maybe_pluralize(
len(pages_generator.hidden_pages) + (len(pages_generator.hidden_pages) +
len(pages_generator.hidden_translations), len(pages_generator.hidden_translations)),
'hidden page', 'hidden page',
'hidden pages') 'hidden pages')
@ -243,8 +239,8 @@ class Pelican(object):
return generators return generators
def get_writer(self): def get_writer(self):
writers = [ w for (_, w) in signals.get_writer.send(self) writers = [w for (_, w) in signals.get_writer.send(self)
if isinstance(w, type) ] if isinstance(w, type)]
writers_found = len(writers) writers_found = len(writers)
if writers_found == 0: if writers_found == 0:
return Writer(self.output_path, settings=self.settings) return Writer(self.output_path, settings=self.settings)
@ -254,15 +250,15 @@ class Pelican(object):
logger.debug('Found writer: %s', writer) logger.debug('Found writer: %s', writer)
else: else:
logger.warning( logger.warning(
'%s writers found, using only first one: %s', '%s writers found, using only first one: %s',
writers_found, writer) writers_found, writer)
return writer(self.output_path, settings=self.settings) return writer(self.output_path, settings=self.settings)
def parse_arguments(): def parse_arguments():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="""A tool to generate a static blog, description='A tool to generate a static blog, '
with restructured text input files.""", ' with restructured text input files.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter formatter_class=argparse.ArgumentDefaultsHelpFormatter
) )
@ -354,7 +350,7 @@ def get_config(args):
# argparse returns bytes in Py2. There is no definite answer as to which # argparse returns bytes in Py2. There is no definite answer as to which
# encoding argparse (or sys.argv) uses. # encoding argparse (or sys.argv) uses.
# "Best" option seems to be locale.getpreferredencoding() # "Best" option seems to be locale.getpreferredencoding()
# ref: http://mail.python.org/pipermail/python-list/2006-October/405766.html # http://mail.python.org/pipermail/python-list/2006-October/405766.html
if not six.PY3: if not six.PY3:
enc = locale.getpreferredencoding() enc = locale.getpreferredencoding()
for key in config: for key in config:
@ -424,7 +420,8 @@ def main():
# Added static paths # Added static paths
# Add new watchers and set them as modified # Add new watchers and set them as modified
for static_path in set(new_static).difference(old_static): new_watchers = set(new_static).difference(old_static)
for static_path in new_watchers:
static_key = '[static]%s' % static_path static_key = '[static]%s' % static_path
watchers[static_key] = folder_watcher( watchers[static_key] = folder_watcher(
os.path.join(pelican.path, static_path), os.path.join(pelican.path, static_path),
@ -434,7 +431,8 @@ def main():
# Removed static paths # Removed static paths
# Remove watchers and modified values # Remove watchers and modified values
for static_path in set(old_static).difference(new_static): old_watchers = set(old_static).difference(new_static)
for static_path in old_watchers:
static_key = '[static]%s' % static_path static_key = '[static]%s' % static_path
watchers.pop(static_key) watchers.pop(static_key)
modified.pop(static_key) modified.pop(static_key)

View file

@ -1,16 +1,14 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
import hashlib import hashlib
import logging import logging
import os import os
try:
import cPickle as pickle from six.moves import cPickle as pickle
except:
import pickle
from pelican.utils import mkdir_p from pelican.utils import mkdir_p
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -83,6 +81,7 @@ class FileStampDataCacher(FileDataCacher):
"""This sublcass additionally sets filestamp function """This sublcass additionally sets filestamp function
and base path for filestamping operations and base path for filestamping operations
""" """
super(FileStampDataCacher, self).__init__(settings, cache_name, super(FileStampDataCacher, self).__init__(settings, cache_name,
caching_policy, caching_policy,
load_policy) load_policy)
@ -118,6 +117,7 @@ class FileStampDataCacher(FileDataCacher):
a hash for a function name in the hashlib module a hash for a function name in the hashlib module
or an empty bytes string otherwise or an empty bytes string otherwise
""" """
try: try:
return self._filestamp_func(filename) return self._filestamp_func(filename)
except (IOError, OSError, TypeError) as err: except (IOError, OSError, TypeError) as err:
@ -133,6 +133,7 @@ class FileStampDataCacher(FileDataCacher):
Modification is checked by comparing the cached Modification is checked by comparing the cached
and current file stamp. and current file stamp.
""" """
stamp, data = super(FileStampDataCacher, self).get_cached_data( stamp, data = super(FileStampDataCacher, self).get_cached_data(
filename, (None, default)) filename, (None, default))
if stamp != self._get_file_stamp(filename): if stamp != self._get_file_stamp(filename):

View file

@ -1,23 +1,24 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import six
from six.moves.urllib.parse import urlparse, urlunparse
import copy import copy
import locale import locale
import logging import logging
import functools
import os import os
import re import re
import sys import sys
import pytz import pytz
import six
from six.moves.urllib.parse import urlparse, urlunparse
from pelican import signals from pelican import signals
from pelican.settings import DEFAULT_CONFIG from pelican.settings import DEFAULT_CONFIG
from pelican.utils import (slugify, truncate_html_words, memoized, strftime, from pelican.utils import (SafeDatetime, deprecated_attribute, memoized,
python_2_unicode_compatible, deprecated_attribute, path_to_url, posixize_path,
path_to_url, posixize_path, set_date_tzinfo, SafeDatetime) python_2_unicode_compatible, set_date_tzinfo,
slugify, strftime, truncate_html_words)
# Import these so that they're avalaible when you import from pelican.contents. # Import these so that they're avalaible when you import from pelican.contents.
from pelican.urlwrappers import (URLWrapper, Author, Category, Tag) # NOQA from pelican.urlwrappers import (URLWrapper, Author, Category, Tag) # NOQA
@ -66,7 +67,7 @@ class Content(object):
# also keep track of the metadata attributes available # also keep track of the metadata attributes available
self.metadata = local_metadata self.metadata = local_metadata
#default template if it's not defined in page # default template if it's not defined in page
self.template = self._get_template() self.template = self._get_template()
# First, read the authors from "authors", if not, fallback to "author" # First, read the authors from "authors", if not, fallback to "author"
@ -94,13 +95,16 @@ class Content(object):
# create the slug if not existing, generate slug according to # create the slug if not existing, generate slug according to
# setting of SLUG_ATTRIBUTE # setting of SLUG_ATTRIBUTE
if not hasattr(self, 'slug'): if not hasattr(self, 'slug'):
if settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title'): if (settings['SLUGIFY_SOURCE'] == 'title' and
hasattr(self, 'title')):
self.slug = slugify(self.title, self.slug = slugify(self.title,
settings.get('SLUG_SUBSTITUTIONS', ())) settings.get('SLUG_SUBSTITUTIONS', ()))
elif settings['SLUGIFY_SOURCE'] == 'basename' and source_path != None: elif (settings['SLUGIFY_SOURCE'] == 'basename' and
basename = os.path.basename(os.path.splitext(source_path)[0]) source_path is not None):
self.slug = slugify(basename, basename = os.path.basename(
settings.get('SLUG_SUBSTITUTIONS', ())) os.path.splitext(source_path)[0])
self.slug = slugify(
basename, settings.get('SLUG_SUBSTITUTIONS', ()))
self.source_path = source_path self.source_path = source_path
@ -233,7 +237,8 @@ class Content(object):
if isinstance(linked_content, Static): if isinstance(linked_content, Static):
linked_content.attach_to(self) linked_content.attach_to(self)
else: else:
logger.warning("%s used {attach} link syntax on a " logger.warning(
"%s used {attach} link syntax on a "
"non-static file. Use {filename} instead.", "non-static file. Use {filename} instead.",
self.get_relative_source_path()) self.get_relative_source_path())
origin = '/'.join((siteurl, linked_content.url)) origin = '/'.join((siteurl, linked_content.url))
@ -241,7 +246,7 @@ class Content(object):
else: else:
logger.warning( logger.warning(
"Unable to find `%s`, skipping url replacement.", "Unable to find `%s`, skipping url replacement.",
value.geturl(), extra = { value.geturl(), extra={
'limit_msg': ("Other resources were not found " 'limit_msg': ("Other resources were not found "
"and their urls not replaced")}) "and their urls not replaced")})
elif what == 'category': elif what == 'category':
@ -250,9 +255,9 @@ class Content(object):
origin = '/'.join((siteurl, Tag(path, self.settings).url)) origin = '/'.join((siteurl, Tag(path, self.settings).url))
else: else:
logger.warning( logger.warning(
"Replacement Indicator '%s' not recognized, " "Replacement Indicator '%s' not recognized, "
"skipping replacement", "skipping replacement",
what) what)
# keep all other parts, such as query, fragment, etc. # keep all other parts, such as query, fragment, etc.
parts = list(value) parts = list(value)
@ -337,7 +342,9 @@ class Content(object):
return posixize_path( return posixize_path(
os.path.relpath( os.path.relpath(
os.path.abspath(os.path.join(self.settings['PATH'], source_path)), os.path.abspath(os.path.join(
self.settings['PATH'],
source_path)),
os.path.abspath(self.settings['PATH']) os.path.abspath(self.settings['PATH'])
)) ))
@ -402,9 +409,12 @@ class Static(Page):
def attach_to(self, content): def attach_to(self, content):
"""Override our output directory with that of the given content object. """Override our output directory with that of the given content object.
""" """
# Determine our file's new output path relative to the linking document.
# If it currently lives beneath the linking document's source directory, # Determine our file's new output path relative to the linking
# preserve that relationship on output. Otherwise, make it a sibling. # document. If it currently lives beneath the linking
# document's source directory, preserve that relationship on output.
# Otherwise, make it a sibling.
linking_source_dir = os.path.dirname(content.source_path) linking_source_dir = os.path.dirname(content.source_path)
tail_path = os.path.relpath(self.source_path, linking_source_dir) tail_path = os.path.relpath(self.source_path, linking_source_dir)
if tail_path.startswith(os.pardir + os.sep): if tail_path.startswith(os.pardir + os.sep):
@ -420,11 +430,14 @@ class Static(Page):
# 'some/content' with a file named 'index.html'.) Rather than trying # 'some/content' with a file named 'index.html'.) Rather than trying
# to figure it out by comparing the linking document's url and save_as # to figure it out by comparing the linking document's url and save_as
# path, we simply build our new url from our new save_as path. # path, we simply build our new url from our new save_as path.
new_url = path_to_url(new_save_as) new_url = path_to_url(new_save_as)
def _log_reason(reason): def _log_reason(reason):
logger.warning("The {attach} link in %s cannot relocate %s " logger.warning(
"because %s. Falling back to {filename} link behavior instead.", "The {attach} link in %s cannot relocate "
"%s because %s. Falling back to "
"{filename} link behavior instead.",
content.get_relative_source_path(), content.get_relative_source_path(),
self.get_relative_source_path(), reason, self.get_relative_source_path(), reason,
extra={'limit_msg': "More {attach} warnings silenced."}) extra={'limit_msg': "More {attach} warnings silenced."})
@ -452,5 +465,6 @@ def is_valid_content(content, f):
content.check_properties() content.check_properties()
return True return True
except NameError as e: except NameError as e:
logger.error("Skipping %s: could not find information about '%s'", f, e) logger.error(
"Skipping %s: could not find information about '%s'", f, e)
return False return False

View file

@ -1,28 +1,28 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import os
import six
import logging
import shutil
import fnmatch
import calendar import calendar
import fnmatch
import logging
import os
import shutil
from codecs import open from codecs import open
from collections import defaultdict from collections import defaultdict
from functools import partial from functools import partial
from itertools import chain, groupby from itertools import chain, groupby
from operator import attrgetter from operator import attrgetter
from jinja2 import (Environment, FileSystemLoader, PrefixLoader, ChoiceLoader, from jinja2 import (BaseLoader, ChoiceLoader, Environment, FileSystemLoader,
BaseLoader, TemplateNotFound) PrefixLoader, TemplateNotFound)
import six
from pelican import signals
from pelican.cache import FileStampDataCacher from pelican.cache import FileStampDataCacher
from pelican.contents import Article, Draft, Page, Static, is_valid_content from pelican.contents import Article, Draft, Page, Static, is_valid_content
from pelican.readers import Readers from pelican.readers import Readers
from pelican.utils import (copy, process_translations, mkdir_p, DateFormatter, from pelican.utils import (DateFormatter, copy, mkdir_p, posixize_path,
python_2_unicode_compatible, posixize_path) process_translations, python_2_unicode_compatible)
from pelican import signals
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -31,6 +31,7 @@ logger = logging.getLogger(__name__)
class PelicanTemplateNotFound(Exception): class PelicanTemplateNotFound(Exception):
pass pass
@python_2_unicode_compatible @python_2_unicode_compatible
class Generator(object): class Generator(object):
"""Baseclass generator""" """Baseclass generator"""
@ -90,8 +91,9 @@ class Generator(object):
try: try:
self._templates[name] = self.env.get_template(name + '.html') self._templates[name] = self.env.get_template(name + '.html')
except TemplateNotFound: except TemplateNotFound:
raise PelicanTemplateNotFound('[templates] unable to load %s.html from %s' raise PelicanTemplateNotFound(
% (name, self._templates_path)) '[templates] unable to load {}.html from {}'.format(
name, self._templates_path))
return self._templates[name] return self._templates[name]
def _include_path(self, path, extensions=None): def _include_path(self, path, extensions=None):
@ -105,7 +107,7 @@ class Generator(object):
extensions = tuple(self.readers.extensions) extensions = tuple(self.readers.extensions)
basename = os.path.basename(path) basename = os.path.basename(path)
#check IGNORE_FILES # check IGNORE_FILES
ignores = self.settings['IGNORE_FILES'] ignores = self.settings['IGNORE_FILES']
if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores): if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):
return False return False
@ -122,8 +124,9 @@ class Generator(object):
:param extensions: the list of allowed extensions (if False, all :param extensions: the list of allowed extensions (if False, all
extensions are allowed) extensions are allowed)
""" """
# backward compatibility for older generators
if isinstance(paths, six.string_types): if isinstance(paths, six.string_types):
paths = [paths] # backward compatibility for older generators paths = [paths]
# group the exclude dir names by parent path, for use with os.walk() # group the exclude dir names by parent path, for use with os.walk()
exclusions_by_dirpath = {} exclusions_by_dirpath = {}
@ -138,7 +141,8 @@ class Generator(object):
root = os.path.join(self.path, path) if path else self.path root = os.path.join(self.path, path) if path else self.path
if os.path.isdir(root): if os.path.isdir(root):
for dirpath, dirs, temp_files in os.walk(root, followlinks=True): for dirpath, dirs, temp_files in os.walk(
root, followlinks=True):
drop = [] drop = []
excl = exclusions_by_dirpath.get(dirpath, ()) excl = exclusions_by_dirpath.get(dirpath, ())
for d in dirs: for d in dirs:
@ -178,7 +182,8 @@ class Generator(object):
before this method is called, even if they failed to process.) before this method is called, even if they failed to process.)
The path argument is expected to be relative to self.path. The path argument is expected to be relative to self.path.
""" """
return posixize_path(os.path.normpath(path)) in self.context['filenames'] return (posixize_path(os.path.normpath(path))
in self.context['filenames'])
def _update_context(self, items): def _update_context(self, items):
"""Update the context with the given items from the currrent """Update the context with the given items from the currrent
@ -211,7 +216,8 @@ class CachingGenerator(Generator, FileStampDataCacher):
readers_cache_name=(cls_name + '-Readers'), readers_cache_name=(cls_name + '-Readers'),
**kwargs) **kwargs)
cache_this_level = self.settings['CONTENT_CACHING_LAYER'] == 'generator' cache_this_level = \
self.settings['CONTENT_CACHING_LAYER'] == 'generator'
caching_policy = cache_this_level and self.settings['CACHE_CONTENT'] caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE'] load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
FileStampDataCacher.__init__(self, self.settings, cls_name, FileStampDataCacher.__init__(self, self.settings, cls_name,
@ -259,14 +265,14 @@ class ArticlesGenerator(CachingGenerator):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
"""initialize properties""" """initialize properties"""
self.articles = [] # only articles in default language self.articles = [] # only articles in default language
self.translations = [] self.translations = []
self.dates = {} self.dates = {}
self.tags = defaultdict(list) self.tags = defaultdict(list)
self.categories = defaultdict(list) self.categories = defaultdict(list)
self.related_posts = [] self.related_posts = []
self.authors = defaultdict(list) self.authors = defaultdict(list)
self.drafts = [] # only drafts in default language self.drafts = [] # only drafts in default language
self.drafts_translations = [] self.drafts_translations = []
super(ArticlesGenerator, self).__init__(*args, **kwargs) super(ArticlesGenerator, self).__init__(*args, **kwargs)
signals.article_generator_init.send(self) signals.article_generator_init.send(self)
@ -282,8 +288,8 @@ class ArticlesGenerator(CachingGenerator):
writer.write_feed(self.articles, self.context, writer.write_feed(self.articles, self.context,
self.settings['FEED_RSS'], feed_type='rss') self.settings['FEED_RSS'], feed_type='rss')
if (self.settings.get('FEED_ALL_ATOM') if (self.settings.get('FEED_ALL_ATOM') or
or self.settings.get('FEED_ALL_RSS')): self.settings.get('FEED_ALL_RSS')):
all_articles = list(self.articles) all_articles = list(self.articles)
for article in self.articles: for article in self.articles:
all_articles.extend(article.translations) all_articles.extend(article.translations)
@ -322,8 +328,8 @@ class ArticlesGenerator(CachingGenerator):
self.settings['AUTHOR_FEED_RSS'] self.settings['AUTHOR_FEED_RSS']
% auth.slug, feed_type='rss') % auth.slug, feed_type='rss')
if (self.settings.get('TAG_FEED_ATOM') if (self.settings.get('TAG_FEED_ATOM') or
or self.settings.get('TAG_FEED_RSS')): self.settings.get('TAG_FEED_RSS')):
for tag, arts in self.tags.items(): for tag, arts in self.tags.items():
arts.sort(key=attrgetter('date'), reverse=True) arts.sort(key=attrgetter('date'), reverse=True)
if self.settings.get('TAG_FEED_ATOM'): if self.settings.get('TAG_FEED_ATOM'):
@ -336,8 +342,8 @@ class ArticlesGenerator(CachingGenerator):
self.settings['TAG_FEED_RSS'] % tag.slug, self.settings['TAG_FEED_RSS'] % tag.slug,
feed_type='rss') feed_type='rss')
if (self.settings.get('TRANSLATION_FEED_ATOM') if (self.settings.get('TRANSLATION_FEED_ATOM') or
or self.settings.get('TRANSLATION_FEED_RSS')): self.settings.get('TRANSLATION_FEED_RSS')):
translations_feeds = defaultdict(list) translations_feeds = defaultdict(list)
for article in chain(self.articles, self.translations): for article in chain(self.articles, self.translations):
translations_feeds[article.lang].append(article) translations_feeds[article.lang].append(article)
@ -472,9 +478,9 @@ class ArticlesGenerator(CachingGenerator):
"""Generate drafts pages.""" """Generate drafts pages."""
for draft in chain(self.drafts_translations, self.drafts): for draft in chain(self.drafts_translations, self.drafts):
write(draft.save_as, self.get_template(draft.template), write(draft.save_as, self.get_template(draft.template),
self.context, article=draft, category=draft.category, self.context, article=draft, category=draft.category,
override_output=hasattr(draft, 'override_save_as'), override_output=hasattr(draft, 'override_save_as'),
blog=True, all_articles=self.articles) blog=True, all_articles=self.articles)
def generate_pages(self, writer): def generate_pages(self, writer):
"""Generate the pages on the disk""" """Generate the pages on the disk"""
@ -503,7 +509,8 @@ class ArticlesGenerator(CachingGenerator):
exclude=self.settings['ARTICLE_EXCLUDES']): exclude=self.settings['ARTICLE_EXCLUDES']):
article_or_draft = self.get_cached_data(f, None) article_or_draft = self.get_cached_data(f, None)
if article_or_draft is None: if article_or_draft is None:
#TODO needs overhaul, maybe nomad for read_file solution, unified behaviour # TODO needs overhaul, maybe nomad for read_file
# solution, unified behaviour
try: try:
article_or_draft = self.readers.read_file( article_or_draft = self.readers.read_file(
base_path=self.path, path=f, content_class=Article, base_path=self.path, path=f, content_class=Article,
@ -513,7 +520,8 @@ class ArticlesGenerator(CachingGenerator):
context_signal=signals.article_generator_context, context_signal=signals.article_generator_context,
context_sender=self) context_sender=self)
except Exception as e: except Exception as e:
logger.error('Could not process %s\n%s', f, e, logger.error(
'Could not process %s\n%s', f, e,
exc_info=self.settings.get('DEBUG', False)) exc_info=self.settings.get('DEBUG', False))
self._add_failed_source_path(f) self._add_failed_source_path(f)
continue continue
@ -535,8 +543,9 @@ class ArticlesGenerator(CachingGenerator):
self.add_source_path(article_or_draft) self.add_source_path(article_or_draft)
all_drafts.append(article_or_draft) all_drafts.append(article_or_draft)
else: else:
logger.error("Unknown status '%s' for file %s, skipping it.", logger.error(
article_or_draft.status, f) "Unknown status '%s' for file %s, skipping it.",
article_or_draft.status, f)
self._add_failed_source_path(f) self._add_failed_source_path(f)
continue continue
@ -544,9 +553,9 @@ class ArticlesGenerator(CachingGenerator):
self.add_source_path(article_or_draft) self.add_source_path(article_or_draft)
self.articles, self.translations = process_translations(
self.articles, self.translations = process_translations(all_articles, all_articles,
order_by=self.settings['ARTICLE_ORDER_BY']) order_by=self.settings['ARTICLE_ORDER_BY'])
self.drafts, self.drafts_translations = \ self.drafts, self.drafts_translations = \
process_translations(all_drafts) process_translations(all_drafts)
@ -615,7 +624,8 @@ class PagesGenerator(CachingGenerator):
context_signal=signals.page_generator_context, context_signal=signals.page_generator_context,
context_sender=self) context_sender=self)
except Exception as e: except Exception as e:
logger.error('Could not process %s\n%s', f, e, logger.error(
'Could not process %s\n%s', f, e,
exc_info=self.settings.get('DEBUG', False)) exc_info=self.settings.get('DEBUG', False))
self._add_failed_source_path(f) self._add_failed_source_path(f)
continue continue
@ -629,8 +639,9 @@ class PagesGenerator(CachingGenerator):
elif page.status.lower() == "hidden": elif page.status.lower() == "hidden":
hidden_pages.append(page) hidden_pages.append(page)
else: else:
logger.error("Unknown status '%s' for file %s, skipping it.", logger.error(
page.status, f) "Unknown status '%s' for file %s, skipping it.",
page.status, f)
self._add_failed_source_path(f) self._add_failed_source_path(f)
continue continue
@ -638,10 +649,11 @@ class PagesGenerator(CachingGenerator):
self.add_source_path(page) self.add_source_path(page)
self.pages, self.translations = process_translations(all_pages, self.pages, self.translations = process_translations(
order_by=self.settings['PAGE_ORDER_BY']) all_pages,
self.hidden_pages, self.hidden_translations = ( order_by=self.settings['PAGE_ORDER_BY'])
process_translations(hidden_pages)) self.hidden_pages, self.hidden_translations = \
process_translations(hidden_pages)
self._update_context(('pages', 'hidden_pages')) self._update_context(('pages', 'hidden_pages'))

View file

@ -1,18 +1,18 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import locale
import logging
import os
import sys
from collections import Mapping, defaultdict
import six
__all__ = [ __all__ = [
'init' 'init'
] ]
import os
import sys
import logging
import locale
from collections import defaultdict, Mapping
import six
class BaseFormatter(logging.Formatter): class BaseFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None): def __init__(self, fmt=None, datefmt=None):
@ -20,7 +20,8 @@ class BaseFormatter(logging.Formatter):
super(BaseFormatter, self).__init__(fmt=FORMAT, datefmt=datefmt) super(BaseFormatter, self).__init__(fmt=FORMAT, datefmt=datefmt)
def format(self, record): def format(self, record):
record.__dict__['customlevelname'] = self._get_levelname(record.levelname) customlevel = self._get_levelname(record.levelname)
record.__dict__['customlevelname'] = customlevel
# format multiline messages 'nicely' to make it clear they are together # format multiline messages 'nicely' to make it clear they are together
record.msg = record.msg.replace('\n', '\n | ') record.msg = record.msg.replace('\n', '\n | ')
return super(BaseFormatter, self).format(record) return super(BaseFormatter, self).format(record)
@ -132,13 +133,13 @@ class SafeLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None): def _log(self, level, msg, args, exc_info=None, extra=None):
# if the only argument is a Mapping, Logger uses that for formatting # if the only argument is a Mapping, Logger uses that for formatting
# format values for that case # format values for that case
if args and len(args)==1 and isinstance(args[0], Mapping): if args and len(args) == 1 and isinstance(args[0], Mapping):
args = ({k: self._decode_arg(v) for k, v in args[0].items()},) args = ({k: self._decode_arg(v) for k, v in args[0].items()},)
# otherwise, format each arg # otherwise, format each arg
else: else:
args = tuple(self._decode_arg(arg) for arg in args) args = tuple(self._decode_arg(arg) for arg in args)
super(SafeLogger, self)._log(level, msg, args, super(SafeLogger, self)._log(
exc_info=exc_info, extra=extra) level, msg, args, exc_info=exc_info, extra=extra)
def _decode_arg(self, arg): def _decode_arg(self, arg):
''' '''
@ -175,8 +176,7 @@ def init(level=None, handler=logging.StreamHandler()):
logger = logging.getLogger() logger = logging.getLogger()
if (os.isatty(sys.stdout.fileno()) if os.isatty(sys.stdout.fileno()) and not sys.platform.startswith('win'):
and not sys.platform.startswith('win')):
fmt = ANSIFormatter() fmt = ANSIFormatter()
else: else:
fmt = TextFormatter() fmt = TextFormatter()

View file

@ -1,18 +1,15 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import six
# From django.core.paginator
from collections import namedtuple
import functools import functools
import logging import logging
import os import os
from collections import namedtuple
from math import ceil from math import ceil
import six
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
PaginationRule = namedtuple( PaginationRule = namedtuple(
'PaginationRule', 'PaginationRule',
'min_page URL SAVE_AS', 'min_page URL SAVE_AS',
@ -143,7 +140,7 @@ class Page(object):
'settings': self.settings, 'settings': self.settings,
'base_name': os.path.dirname(self.name), 'base_name': os.path.dirname(self.name),
'number_sep': '/', 'number_sep': '/',
'extension': self.extension, 'extension': self.extension,
} }
if self.number == 1: if self.number == 1:

View file

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import logging import logging
import os import os
@ -9,24 +9,50 @@ import docutils
import docutils.core import docutils.core
import docutils.io import docutils.io
from docutils.writers.html4css1 import HTMLTranslator from docutils.writers.html4css1 import HTMLTranslator
import six
# import the directives to have pygments support import six
from six.moves.html_parser import HTMLParser
from pelican import rstdirectives # NOQA from pelican import rstdirectives # NOQA
from pelican import signals
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.utils import SafeDatetime, get_date, pelican_open, posixize_path
try: try:
from markdown import Markdown from markdown import Markdown
except ImportError: except ImportError:
Markdown = False # NOQA Markdown = False # NOQA
try: try:
from html import escape from html import escape
except ImportError: except ImportError:
from cgi import escape from cgi import escape
from six.moves.html_parser import HTMLParser
from pelican import signals # Metadata processors have no way to discard an unwanted value, so we have
from pelican.cache import FileStampDataCacher # them return this value instead to signal that it should be discarded later.
from pelican.contents import Page, Category, Tag, Author # This means that _filter_discardable_metadata() must be called on processed
from pelican.utils import get_date, pelican_open, SafeDatetime, posixize_path # metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text): def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works """Canonicalize the format of a list of authors or tags. This works
@ -49,13 +75,6 @@ def ensure_metadata_list(text):
return [v for v in (w.strip() for w in text) if v] return [v for v in (w.strip() for w in text) if v]
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
def _process_if_nonempty(processor, name, settings): def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor. """Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead. If name is empty or all whitespace, returns _DISCARD instead.
@ -64,28 +83,11 @@ def _process_if_nonempty(processor, name, settings):
return processor(name, settings) if name else _DISCARD return processor(name, settings) if name else _DISCARD
METADATA_PROCESSORS = {
'tags': lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)]
or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([Author(author, y)
for author in ensure_metadata_list(x)]
or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
def _filter_discardable_metadata(metadata): def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable.""" """Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD} return {name: val for name, val in metadata.items() if val is not _DISCARD}
logger = logging.getLogger(__name__)
class BaseReader(object): class BaseReader(object):
"""Base class to read files. """Base class to read files.
@ -267,8 +269,10 @@ class MarkdownReader(BaseReader):
output[name] = self.process_metadata(name, summary) output[name] = self.process_metadata(name, summary)
elif name in METADATA_PROCESSORS: elif name in METADATA_PROCESSORS:
if len(value) > 1: if len(value) > 1:
logger.warning('Duplicate definition of `%s` ' logger.warning(
'for %s. Using first one.', name, self._source_path) 'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
output[name] = self.process_metadata(name, value[0]) output[name] = self.process_metadata(name, value[0])
elif len(value) > 1: elif len(value) > 1:
# handle list metadata as list of string # handle list metadata as list of string
@ -380,7 +384,8 @@ class HTMLReader(BaseReader):
def _handle_meta_tag(self, attrs): def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name') name = self._attr_value(attrs, 'name')
if name is None: if name is None:
attr_serialized = ', '.join(['{}="{}"'.format(k, v) for k, v in attrs]) attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' " logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s", "attribute, skipping. Attributes: %s",
self._filename, attr_serialized) self._filename, attr_serialized)
@ -394,9 +399,9 @@ class HTMLReader(BaseReader):
"Meta tag attribute 'contents' used in file %s, should" "Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'", " be changed to 'content'",
self._filename, self._filename,
extra={'limit_msg': ("Other files have meta tag " extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should " "attribute 'contents' that should "
"be changed to 'content'")}) "be changed to 'content'"})
if name == 'keywords': if name == 'keywords':
name = 'tags' name = 'tags'
@ -474,7 +479,8 @@ class Readers(FileStampDataCacher):
path = os.path.abspath(os.path.join(base_path, path)) path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path)) source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug('Read file %s -> %s', logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__) source_path, content_class.__name__)
if not fmt: if not fmt:
@ -486,7 +492,8 @@ class Readers(FileStampDataCacher):
'Pelican does not know how to parse %s', path) 'Pelican does not know how to parse %s', path)
if preread_signal: if preread_signal:
logger.debug('Signal %s.send(%s)', logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender) preread_signal.name, preread_sender)
preread_signal.send(preread_sender) preread_signal.send(preread_sender)
@ -527,7 +534,9 @@ class Readers(FileStampDataCacher):
def typogrify_wrapper(text): def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible""" """Ensures ignore_tags feature is backward compatible"""
try: try:
return typogrify(text, self.settings['TYPOGRIFY_IGNORE_TAGS']) return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError: except TypeError:
return typogrify(text) return typogrify(text)
@ -539,8 +548,10 @@ class Readers(FileStampDataCacher):
metadata['summary'] = typogrify_wrapper(metadata['summary']) metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal: if context_signal:
logger.debug('Signal %s.send(%s, <metadata>)', logger.debug(
context_signal.name, context_sender) 'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
context_signal.send(context_sender, metadata=metadata) context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata, return content_class(content=content, metadata=metadata,
@ -591,7 +602,8 @@ def default_metadata(settings=None, process=None):
if process: if process:
value = process('category', value) value = process('category', value)
metadata['category'] = value metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and settings['DEFAULT_DATE'] != 'fs': if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE']) metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE'])
return metadata return metadata

View file

@ -1,13 +1,17 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import re
from docutils import nodes, utils from docutils import nodes, utils
from docutils.parsers.rst import directives, roles, Directive from docutils.parsers.rst import Directive, directives, roles
from pygments.formatters import HtmlFormatter
from pygments import highlight from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer from pygments.formatters import HtmlFormatter
import re from pygments.lexers import TextLexer, get_lexer_by_name
import six import six
import pelican.settings as pys import pelican.settings as pys

View file

@ -1,16 +1,18 @@
from __future__ import print_function # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import logging
import os import os
import sys import sys
import logging
from six.moves import SimpleHTTPServer as srvmod
from six.moves import socketserver
try: try:
from magic import from_file as magic_from_file from magic import from_file as magic_from_file
except ImportError: except ImportError:
magic_from_file = None magic_from_file = None
from six.moves import SimpleHTTPServer as srvmod
from six.moves import socketserver
class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler): class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
SUFFIXES = ['', '.html', '/index.html'] SUFFIXES = ['', '.html', '/index.html']
@ -54,12 +56,12 @@ if __name__ == '__main__':
socketserver.TCPServer.allow_reuse_address = True socketserver.TCPServer.allow_reuse_address = True
try: try:
httpd = socketserver.TCPServer((SERVER, PORT), ComplexHTTPRequestHandler) httpd = socketserver.TCPServer(
(SERVER, PORT), ComplexHTTPRequestHandler)
except OSError as e: except OSError as e:
logging.error("Could not listen on port %s, server %s.", PORT, SERVER) logging.error("Could not listen on port %s, server %s.", PORT, SERVER)
sys.exit(getattr(e, 'exitcode', 1)) sys.exit(getattr(e, 'exitcode', 1))
logging.info("Serving at port %s, server %s.", PORT, SERVER) logging.info("Serving at port %s, server %s.", PORT, SERVER)
try: try:
httpd.serve_forever() httpd.serve_forever()

View file

@ -1,31 +1,32 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import six
import copy import copy
import inspect import inspect
import os
import locale import locale
import logging import logging
import os
from os.path import isabs
from posixpath import join as posix_join
import six
from pelican.log import LimitFilter
try: try:
# SourceFileLoader is the recommended way in 3.3+ # SourceFileLoader is the recommended way in 3.3+
from importlib.machinery import SourceFileLoader from importlib.machinery import SourceFileLoader
load_source = lambda name, path: SourceFileLoader(name, path).load_module()
def load_source(name, path):
return SourceFileLoader(name, path).load_module()
except ImportError: except ImportError:
# but it does not exist in 3.2-, so fall back to imp # but it does not exist in 3.2-, so fall back to imp
import imp import imp
load_source = imp.load_source load_source = imp.load_source
from os.path import isabs
from pelican.utils import posix_join
from pelican.log import LimitFilter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)), DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'themes', 'notmyidea') 'themes', 'notmyidea')
DEFAULT_CONFIG = { DEFAULT_CONFIG = {
@ -131,7 +132,7 @@ DEFAULT_CONFIG = {
'LOAD_CONTENT_CACHE': False, 'LOAD_CONTENT_CACHE': False,
'WRITE_SELECTED': [], 'WRITE_SELECTED': [],
'FORMATTED_FIELDS': ['summary'], 'FORMATTED_FIELDS': ['summary'],
} }
PYGMENTS_RST_OPTIONS = None PYGMENTS_RST_OPTIONS = None
@ -158,8 +159,20 @@ def read_settings(path=None, override=None):
"has been deprecated (should be a list)") "has been deprecated (should be a list)")
local_settings['PLUGIN_PATHS'] = [local_settings['PLUGIN_PATHS']] local_settings['PLUGIN_PATHS'] = [local_settings['PLUGIN_PATHS']]
elif local_settings['PLUGIN_PATHS'] is not None: elif local_settings['PLUGIN_PATHS'] is not None:
local_settings['PLUGIN_PATHS'] = [os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(path), pluginpath))) def getabs(path, pluginpath):
if not isabs(pluginpath) else pluginpath for pluginpath in local_settings['PLUGIN_PATHS']] if isabs(pluginpath):
return pluginpath
else:
path_dirname = os.path.dirname(path)
path_joined = os.path.join(path_dirname, pluginpath)
path_normed = os.path.normpath(path_joined)
path_absolute = os.path.abspath(path_normed)
return path_absolute
pluginpath_list = [getabs(path, pluginpath)
for pluginpath
in local_settings['PLUGIN_PATHS']]
local_settings['PLUGIN_PATHS'] = pluginpath_list
else: else:
local_settings = copy.deepcopy(DEFAULT_CONFIG) local_settings = copy.deepcopy(DEFAULT_CONFIG)
@ -199,13 +212,13 @@ def configure_settings(settings):
settings. settings.
Also, specify the log messages to be ignored. Also, specify the log messages to be ignored.
""" """
if not 'PATH' in settings or not os.path.isdir(settings['PATH']): if 'PATH' not in settings or not os.path.isdir(settings['PATH']):
raise Exception('You need to specify a path containing the content' raise Exception('You need to specify a path containing the content'
' (see pelican --help for more information)') ' (see pelican --help for more information)')
# specify the log messages to be ignored # specify the log messages to be ignored
LimitFilter._ignore.update(set(settings.get('LOG_FILTER', log_filter = settings.get('LOG_FILTER', DEFAULT_CONFIG['LOG_FILTER'])
DEFAULT_CONFIG['LOG_FILTER']))) LimitFilter._ignore.update(set(log_filter))
# lookup the theme in "pelican/themes" if the given one doesn't exist # lookup the theme in "pelican/themes" if the given one doesn't exist
if not os.path.isdir(settings['THEME']): if not os.path.isdir(settings['THEME']):
@ -223,19 +236,15 @@ def configure_settings(settings):
settings['WRITE_SELECTED'] = [ settings['WRITE_SELECTED'] = [
os.path.abspath(path) for path in os.path.abspath(path) for path in
settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED']) settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED'])
] ]
# standardize strings to lowercase strings # standardize strings to lowercase strings
for key in [ for key in ['DEFAULT_LANG']:
'DEFAULT_LANG',
]:
if key in settings: if key in settings:
settings[key] = settings[key].lower() settings[key] = settings[key].lower()
# standardize strings to lists # standardize strings to lists
for key in [ for key in ['LOCALE']:
'LOCALE',
]:
if key in settings and isinstance(settings[key], six.string_types): if key in settings and isinstance(settings[key], six.string_types):
settings[key] = [settings[key]] settings[key] = [settings[key]]
@ -243,12 +252,13 @@ def configure_settings(settings):
for key, types in [ for key, types in [
('OUTPUT_SOURCES_EXTENSION', six.string_types), ('OUTPUT_SOURCES_EXTENSION', six.string_types),
('FILENAME_METADATA', six.string_types), ('FILENAME_METADATA', six.string_types),
]: ]:
if key in settings and not isinstance(settings[key], types): if key in settings and not isinstance(settings[key], types):
value = settings.pop(key) value = settings.pop(key)
logger.warn('Detected misconfigured %s (%s), ' logger.warn(
'falling back to the default (%s)', 'Detected misconfigured %s (%s), '
key, value, DEFAULT_CONFIG[key]) 'falling back to the default (%s)',
key, value, DEFAULT_CONFIG[key])
# try to set the different locales, fallback on the default. # try to set the different locales, fallback on the default.
locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE']) locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])
@ -270,16 +280,16 @@ def configure_settings(settings):
logger.warning("Removed extraneous trailing slash from SITEURL.") logger.warning("Removed extraneous trailing slash from SITEURL.")
# If SITEURL is defined but FEED_DOMAIN isn't, # If SITEURL is defined but FEED_DOMAIN isn't,
# set FEED_DOMAIN to SITEURL # set FEED_DOMAIN to SITEURL
if not 'FEED_DOMAIN' in settings: if 'FEED_DOMAIN' not in settings:
settings['FEED_DOMAIN'] = settings['SITEURL'] settings['FEED_DOMAIN'] = settings['SITEURL']
# check content caching layer and warn of incompatibilities # check content caching layer and warn of incompatibilities
if (settings.get('CACHE_CONTENT', False) and if settings.get('CACHE_CONTENT', False) and \
settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and \
settings.get('WITH_FUTURE_DATES', DEFAULT_CONFIG['WITH_FUTURE_DATES'])): settings.get('WITH_FUTURE_DATES', False):
logger.warning('WITH_FUTURE_DATES conflicts with ' logger.warning(
"CONTENT_CACHING_LAYER set to 'generator', " "WITH_FUTURE_DATES conflicts with CONTENT_CACHING_LAYER "
"use 'reader' layer instead") "set to 'generator', use 'reader' layer instead")
# Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
feed_keys = [ feed_keys = [
@ -296,7 +306,7 @@ def configure_settings(settings):
logger.warning('Feeds generated without SITEURL set properly may' logger.warning('Feeds generated without SITEURL set properly may'
' not be valid') ' not be valid')
if not 'TIMEZONE' in settings: if 'TIMEZONE' not in settings:
logger.warning( logger.warning(
'No timezone information specified in the settings. Assuming' 'No timezone information specified in the settings. Assuming'
' your timezone is UTC for feed generation. Check ' ' your timezone is UTC for feed generation. Check '
@ -321,7 +331,8 @@ def configure_settings(settings):
old_key = key + '_DIR' old_key = key + '_DIR'
new_key = key + '_PATHS' new_key = key + '_PATHS'
if old_key in settings: if old_key in settings:
logger.warning('Deprecated setting %s, moving it to %s list', logger.warning(
'Deprecated setting %s, moving it to %s list',
old_key, new_key) old_key, new_key)
settings[new_key] = [settings[old_key]] # also make a list settings[new_key] = [settings[old_key]] # also make a list
del settings[old_key] del settings[old_key]
@ -365,8 +376,9 @@ def configure_settings(settings):
for old, new, doc in [ for old, new, doc in [
('LESS_GENERATOR', 'the Webassets plugin', None), ('LESS_GENERATOR', 'the Webassets plugin', None),
('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA', ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',
'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'), 'https://github.com/getpelican/pelican/'
]: 'blob/master/docs/settings.rst#path-metadata'),
]:
if old in settings: if old in settings:
message = 'The {} setting has been removed in favor of {}'.format( message = 'The {} setting has been removed in favor of {}'.format(
old, new) old, new)

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
from blinker import signal from blinker import signal
# Run-level signals: # Run-level signals:

View file

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
AUTHOR = 'Alexis Métaireau' AUTHOR = 'Alexis Métaireau'
SITENAME = "Alexis' log" SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org' SITEURL = 'http://blog.notmyidea.org'
@ -31,17 +31,16 @@ DEFAULT_METADATA = {'yeah': 'it is'}
# path-specific metadata # path-specific metadata
EXTRA_PATH_METADATA = { EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'}, 'extra/robots.txt': {'path': 'robots.txt'},
} }
# static paths will be copied without parsing their contents # static paths will be copied without parsing their contents
STATIC_PATHS = [ STATIC_PATHS = [
'pictures', 'pictures',
'extra/robots.txt', 'extra/robots.txt',
] ]
FORMATTED_FIELDS = ['summary', 'custom_formatted_field'] FORMATTED_FIELDS = ['summary', 'custom_formatted_field']
# foobar will not be used, because it's not in caps. All configuration keys # foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps # have to be in caps
foobar = "barbaz" foobar = "barbaz"

View file

@ -1,25 +1,26 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
__all__ = ['get_article', 'unittest', ]
import locale
import logging
import os import os
import re import re
import subprocess import subprocess
import sys import sys
from six import StringIO
import logging
from logging.handlers import BufferingHandler
import unittest import unittest
import locale
from functools import wraps
from contextlib import contextmanager from contextlib import contextmanager
from tempfile import mkdtemp from functools import wraps
from logging.handlers import BufferingHandler
from shutil import rmtree from shutil import rmtree
from tempfile import mkdtemp
from six import StringIO
from pelican.contents import Article from pelican.contents import Article
from pelican.settings import DEFAULT_CONFIG from pelican.settings import DEFAULT_CONFIG
__all__ = ['get_article', 'unittest', ]
@contextmanager @contextmanager
def temporary_folder(): def temporary_folder():
@ -167,7 +168,7 @@ def get_settings(**kwargs):
Set keyword arguments to override specific settings. Set keyword arguments to override specific settings.
""" """
settings = DEFAULT_CONFIG.copy() settings = DEFAULT_CONFIG.copy()
for key,value in kwargs.items(): for key, value in kwargs.items():
settings[key] = value settings[key] = value
return settings return settings
@ -179,10 +180,13 @@ class LogCountHandler(BufferingHandler):
logging.handlers.BufferingHandler.__init__(self, capacity) logging.handlers.BufferingHandler.__init__(self, capacity)
def count_logs(self, msg=None, level=None): def count_logs(self, msg=None, level=None):
return len([l for l in self.buffer return len([
if (msg is None or re.match(msg, l.getMessage())) l
and (level is None or l.levelno == level) for l
]) in self.buffer
if (msg is None or re.match(msg, l.getMessage())) and
(level is None or l.levelno == level)
])
class LoggedTestCase(unittest.TestCase): class LoggedTestCase(unittest.TestCase):

View file

@ -1,7 +1,14 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
import os import os
from codecs import open
from shutil import rmtree
from tempfile import mkdtemp
from pelican.generators import ArticlesGenerator, PagesGenerator
from pelican.tests.support import get_settings, unittest
try: try:
from unittest.mock import MagicMock from unittest.mock import MagicMock
except ImportError: except ImportError:
@ -10,12 +17,6 @@ except ImportError:
except ImportError: except ImportError:
MagicMock = False MagicMock = False
from shutil import rmtree
from tempfile import mkdtemp
from pelican.generators import ArticlesGenerator, PagesGenerator
from pelican.tests.support import unittest, get_settings
CUR_DIR = os.path.dirname(__file__) CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, 'content') CONTENT_DIR = os.path.join(CUR_DIR, 'content')
@ -35,7 +36,6 @@ class TestCache(unittest.TestCase):
settings['CACHE_PATH'] = self.temp_cache settings['CACHE_PATH'] = self.temp_cache
return settings return settings
@unittest.skipUnless(MagicMock, 'Needs Mock module') @unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_article_object_caching(self): def test_article_object_caching(self):
"""Test Article objects caching at the generator level""" """Test Article objects caching at the generator level"""
@ -44,7 +44,6 @@ class TestCache(unittest.TestCase):
settings['DEFAULT_DATE'] = (1970, 1, 1) settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None} settings['READERS'] = {'asc': None}
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=settings.copy(), settings=settings, context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
@ -108,7 +107,9 @@ class TestCache(unittest.TestCase):
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
self.assertEqual(generator.readers.read_file.call_count, orig_call_count) self.assertEqual(
generator.readers.read_file.call_count,
orig_call_count)
@unittest.skipUnless(MagicMock, 'Needs Mock module') @unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_page_object_caching(self): def test_page_object_caching(self):
@ -181,5 +182,6 @@ class TestCache(unittest.TestCase):
path=CUR_DIR, theme=settings['THEME'], output_path=None) path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
self.assertEqual(generator.readers.read_file.call_count, orig_call_count) self.assertEqual(
generator.readers.read_file.call_count,
orig_call_count)

View file

@ -1,20 +1,21 @@
from __future__ import unicode_literals, absolute_import # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import locale import locale
import logging
import os.path import os.path
import six from posixpath import join as posix_join
from jinja2.utils import generate_lorem_ipsum
from sys import platform from sys import platform
from pelican.contents import (Page, Article, Static, URLWrapper, from jinja2.utils import generate_lorem_ipsum
Author, Category)
import six
from pelican.contents import Article, Author, Category, Page, Static
from pelican.settings import DEFAULT_CONFIG from pelican.settings import DEFAULT_CONFIG
from pelican.signals import content_object_init from pelican.signals import content_object_init
from pelican.tests.support import LoggedTestCase, mute, unittest, get_settings from pelican.tests.support import LoggedTestCase, get_settings, unittest
from pelican.utils import (path_to_url, truncate_html_words, SafeDatetime, from pelican.utils import SafeDatetime, path_to_url, truncate_html_words
posix_join)
# generate one paragraph, enclosed with <p> # generate one paragraph, enclosed with <p>
@ -49,7 +50,7 @@ class TestPage(unittest.TestCase):
# them to initialise object's attributes. # them to initialise object's attributes.
metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', } metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', }
page = Page(TEST_CONTENT, metadata=metadata, page = Page(TEST_CONTENT, metadata=metadata,
context={'localsiteurl': ''}) context={'localsiteurl': ''})
for key, value in metadata.items(): for key, value in metadata.items():
self.assertTrue(hasattr(page, key)) self.assertTrue(hasattr(page, key))
self.assertEqual(value, getattr(page, key)) self.assertEqual(value, getattr(page, key))
@ -139,14 +140,9 @@ class TestPage(unittest.TestCase):
page = Page(**page_kwargs) page = Page(**page_kwargs)
# page.locale_date is a unicode string in both python2 and python3 # page.locale_date is a unicode string in both python2 and python3
dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT']) dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT'])
# dt_date is a byte string in python2, and a unicode string in python3
# Let's make sure it is a unicode string (relies on python 3.3 supporting the u prefix)
if type(dt_date) != type(u''):
# python2:
dt_date = unicode(dt_date, 'utf8')
self.assertEqual(page.locale_date, dt_date ) self.assertEqual(page.locale_date, dt_date)
page_kwargs['settings'] = get_settings() page_kwargs['settings'] = get_settings()
# I doubt this can work on all platforms ... # I doubt this can work on all platforms ...
@ -307,10 +303,14 @@ class TestPage(unittest.TestCase):
args['settings'] = get_settings() args['settings'] = get_settings()
args['source_path'] = 'content' args['source_path'] = 'content'
args['context']['filenames'] = { args['context']['filenames'] = {
'images/poster.jpg': type(cls_name, (object,), {'url': 'images/poster.jpg'}), 'images/poster.jpg': type(
'assets/video.mp4': type(cls_name, (object,), {'url': 'assets/video.mp4'}), cls_name, (object,), {'url': 'images/poster.jpg'}),
'images/graph.svg': type(cls_name, (object,), {'url': 'images/graph.svg'}), 'assets/video.mp4': type(
'reference.rst': type(cls_name, (object,), {'url': 'reference.html'}), cls_name, (object,), {'url': 'assets/video.mp4'}),
'images/graph.svg': type(
cls_name, (object,), {'url': 'images/graph.svg'}),
'reference.rst': type(
cls_name, (object,), {'url': 'reference.html'}),
} }
# video.poster # video.poster
@ -325,20 +325,25 @@ class TestPage(unittest.TestCase):
content, content,
'There is a video with poster ' 'There is a video with poster '
'<video controls poster="http://notmyidea.org/images/poster.jpg">' '<video controls poster="http://notmyidea.org/images/poster.jpg">'
'<source src="http://notmyidea.org/assets/video.mp4" type="video/mp4">' '<source src="http://notmyidea.org/assets/video.mp4"'
' type="video/mp4">'
'</video>' '</video>'
) )
# object.data # object.data
args['content'] = ( args['content'] = (
'There is a svg object ' 'There is a svg object '
'<object data="{filename}/images/graph.svg" type="image/svg+xml"></object>' '<object data="{filename}/images/graph.svg"'
' type="image/svg+xml">'
'</object>'
) )
content = Page(**args).get_content('http://notmyidea.org') content = Page(**args).get_content('http://notmyidea.org')
self.assertEqual( self.assertEqual(
content, content,
'There is a svg object ' 'There is a svg object '
'<object data="http://notmyidea.org/images/graph.svg" type="image/svg+xml"></object>' '<object data="http://notmyidea.org/images/graph.svg"'
' type="image/svg+xml">'
'</object>'
) )
# blockquote.cite # blockquote.cite
@ -350,7 +355,9 @@ class TestPage(unittest.TestCase):
self.assertEqual( self.assertEqual(
content, content,
'There is a blockquote with cite attribute ' 'There is a blockquote with cite attribute '
'<blockquote cite="http://notmyidea.org/reference.html">blah blah</blockquote>' '<blockquote cite="http://notmyidea.org/reference.html">'
'blah blah'
'</blockquote>'
) )
def test_intrasite_link_markdown_spaces(self): def test_intrasite_link_markdown_spaces(self):
@ -401,17 +408,19 @@ class TestArticle(TestPage):
def test_slugify_category_author(self): def test_slugify_category_author(self):
settings = get_settings() settings = get_settings()
settings['SLUG_SUBSTITUTIONS'] = [ ('C#', 'csharp') ] settings['SLUG_SUBSTITUTIONS'] = [('C#', 'csharp')]
settings['ARTICLE_URL'] = '{author}/{category}/{slug}/' settings['ARTICLE_URL'] = '{author}/{category}/{slug}/'
settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html' settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html'
article_kwargs = self._copy_page_kwargs() article_kwargs = self._copy_page_kwargs()
article_kwargs['metadata']['author'] = Author("O'Brien", settings) article_kwargs['metadata']['author'] = Author("O'Brien", settings)
article_kwargs['metadata']['category'] = Category('C# & stuff', settings) article_kwargs['metadata']['category'] = Category(
'C# & stuff', settings)
article_kwargs['metadata']['title'] = 'fnord' article_kwargs['metadata']['title'] = 'fnord'
article_kwargs['settings'] = settings article_kwargs['settings'] = settings
article = Article(**article_kwargs) article = Article(**article_kwargs)
self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/') self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/')
self.assertEqual(article.save_as, 'obrien/csharp-stuff/fnord/index.html') self.assertEqual(
article.save_as, 'obrien/csharp-stuff/fnord/index.html')
class TestStatic(LoggedTestCase): class TestStatic(LoggedTestCase):
@ -426,7 +435,8 @@ class TestStatic(LoggedTestCase):
self.context = self.settings.copy() self.context = self.settings.copy()
self.static = Static(content=None, metadata={}, settings=self.settings, self.static = Static(content=None, metadata={}, settings=self.settings,
source_path=posix_join('dir', 'foo.jpg'), context=self.context) source_path=posix_join('dir', 'foo.jpg'),
context=self.context)
self.context['filenames'] = {self.static.source_path: self.static} self.context['filenames'] = {self.static.source_path: self.static}
@ -436,8 +446,10 @@ class TestStatic(LoggedTestCase):
def test_attach_to_same_dir(self): def test_attach_to_same_dir(self):
"""attach_to() overrides a static file's save_as and url. """attach_to() overrides a static file's save_as and url.
""" """
page = Page(content="fake page", page = Page(
metadata={'title': 'fakepage'}, settings=self.settings, content="fake page",
metadata={'title': 'fakepage'},
settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md')) source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page) self.static.attach_to(page)
@ -449,7 +461,7 @@ class TestStatic(LoggedTestCase):
"""attach_to() preserves dirs inside the linking document dir. """attach_to() preserves dirs inside the linking document dir.
""" """
page = Page(content="fake page", metadata={'title': 'fakepage'}, page = Page(content="fake page", metadata={'title': 'fakepage'},
settings=self.settings, source_path='fakepage.md') settings=self.settings, source_path='fakepage.md')
self.static.attach_to(page) self.static.attach_to(page)
expected_save_as = os.path.join('outpages', 'dir', 'foo.jpg') expected_save_as = os.path.join('outpages', 'dir', 'foo.jpg')
@ -460,8 +472,8 @@ class TestStatic(LoggedTestCase):
"""attach_to() ignores dirs outside the linking document dir. """attach_to() ignores dirs outside the linking document dir.
""" """
page = Page(content="fake page", page = Page(content="fake page",
metadata={'title': 'fakepage'}, settings=self.settings, metadata={'title': 'fakepage'}, settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md')) source_path=os.path.join('dir', 'otherdir', 'fakepage.md'))
self.static.attach_to(page) self.static.attach_to(page)
expected_save_as = os.path.join('outpages', 'foo.jpg') expected_save_as = os.path.join('outpages', 'foo.jpg')
@ -472,8 +484,8 @@ class TestStatic(LoggedTestCase):
"""attach_to() does nothing when called a second time. """attach_to() does nothing when called a second time.
""" """
page = Page(content="fake page", page = Page(content="fake page",
metadata={'title': 'fakepage'}, settings=self.settings, metadata={'title': 'fakepage'}, settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md')) source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page) self.static.attach_to(page)
@ -481,8 +493,10 @@ class TestStatic(LoggedTestCase):
otherdir_settings.update(dict( otherdir_settings.update(dict(
PAGE_SAVE_AS=os.path.join('otherpages', '{slug}.html'), PAGE_SAVE_AS=os.path.join('otherpages', '{slug}.html'),
PAGE_URL='otherpages/{slug}.html')) PAGE_URL='otherpages/{slug}.html'))
otherdir_page = Page(content="other page", otherdir_page = Page(
metadata={'title': 'otherpage'}, settings=otherdir_settings, content="other page",
metadata={'title': 'otherpage'},
settings=otherdir_settings,
source_path=os.path.join('dir', 'otherpage.md')) source_path=os.path.join('dir', 'otherpage.md'))
self.static.attach_to(otherdir_page) self.static.attach_to(otherdir_page)
@ -497,8 +511,10 @@ class TestStatic(LoggedTestCase):
""" """
original_save_as = self.static.save_as original_save_as = self.static.save_as
page = Page(content="fake page", page = Page(
metadata={'title': 'fakepage'}, settings=self.settings, content="fake page",
metadata={'title': 'fakepage'},
settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md')) source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page) self.static.attach_to(page)
@ -511,8 +527,10 @@ class TestStatic(LoggedTestCase):
""" """
original_url = self.static.url original_url = self.static.url
page = Page(content="fake page", page = Page(
metadata={'title': 'fakepage'}, settings=self.settings, content="fake page",
metadata={'title': 'fakepage'},
settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md')) source_path=os.path.join('dir', 'fakepage.md'))
self.static.attach_to(page) self.static.attach_to(page)
@ -523,13 +541,15 @@ class TestStatic(LoggedTestCase):
"""attach_to() does not override paths that were overridden elsewhere. """attach_to() does not override paths that were overridden elsewhere.
(For example, by the user with EXTRA_PATH_METADATA) (For example, by the user with EXTRA_PATH_METADATA)
""" """
customstatic = Static(content=None, customstatic = Static(
content=None,
metadata=dict(save_as='customfoo.jpg', url='customfoo.jpg'), metadata=dict(save_as='customfoo.jpg', url='customfoo.jpg'),
settings=self.settings, settings=self.settings,
source_path=os.path.join('dir', 'foo.jpg'), source_path=os.path.join('dir', 'foo.jpg'),
context=self.settings.copy()) context=self.settings.copy())
page = Page(content="fake page", page = Page(
content="fake page",
metadata={'title': 'fakepage'}, settings=self.settings, metadata={'title': 'fakepage'}, settings=self.settings,
source_path=os.path.join('dir', 'fakepage.md')) source_path=os.path.join('dir', 'fakepage.md'))
@ -542,13 +562,16 @@ class TestStatic(LoggedTestCase):
"""{attach} link syntax triggers output path override & url replacement. """{attach} link syntax triggers output path override & url replacement.
""" """
html = '<a href="{attach}../foo.jpg">link</a>' html = '<a href="{attach}../foo.jpg">link</a>'
page = Page(content=html, page = Page(
metadata={'title': 'fakepage'}, settings=self.settings, content=html,
metadata={'title': 'fakepage'},
settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context) context=self.context)
content = page.get_content('') content = page.get_content('')
self.assertNotEqual(content, html, self.assertNotEqual(
content, html,
"{attach} link syntax did not trigger URL replacement.") "{attach} link syntax did not trigger URL replacement.")
expected_save_as = os.path.join('outpages', 'foo.jpg') expected_save_as = os.path.join('outpages', 'foo.jpg')
@ -561,7 +584,8 @@ class TestStatic(LoggedTestCase):
html = '<a href="{tag}foo">link</a>' html = '<a href="{tag}foo">link</a>'
page = Page( page = Page(
content=html, content=html,
metadata={'title': 'fakepage'}, settings=self.settings, metadata={'title': 'fakepage'},
settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context) context=self.context)
content = page.get_content('') content = page.get_content('')
@ -572,8 +596,10 @@ class TestStatic(LoggedTestCase):
"{category} link syntax triggers url replacement." "{category} link syntax triggers url replacement."
html = '<a href="{category}foo">link</a>' html = '<a href="{category}foo">link</a>'
page = Page(content=html, page = Page(
metadata={'title': 'fakepage'}, settings=self.settings, content=html,
metadata={'title': 'fakepage'},
settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context) context=self.context)
content = page.get_content('') content = page.get_content('')
@ -588,11 +614,11 @@ class TestStatic(LoggedTestCase):
metadata={'title': 'fakepage'}, settings=self.settings, metadata={'title': 'fakepage'}, settings=self.settings,
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'), source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
context=self.context) context=self.context)
content = page.get_content('') content = page.get_content('')
self.assertEqual(content, html) self.assertEqual(content, html)
self.assertLogCountEqual( self.assertLogCountEqual(
count=1, count=1,
msg="Replacement Indicator 'unknown' not recognized, " msg="Replacement Indicator 'unknown' not recognized, "
"skipping replacement", "skipping replacement",
level=logging.WARNING) level=logging.WARNING)

View file

@ -1,8 +1,18 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
import locale
import os import os
from codecs import open from codecs import open
from shutil import rmtree
from tempfile import mkdtemp
from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator,
StaticGenerator, TemplatePagesGenerator)
from pelican.tests.support import get_settings, unittest
from pelican.writers import Writer
try: try:
from unittest.mock import MagicMock from unittest.mock import MagicMock
except ImportError: except ImportError:
@ -10,14 +20,7 @@ except ImportError:
from mock import MagicMock from mock import MagicMock
except ImportError: except ImportError:
MagicMock = False MagicMock = False
from shutil import rmtree
from tempfile import mkdtemp
from pelican.generators import (Generator, ArticlesGenerator, PagesGenerator,
StaticGenerator, TemplatePagesGenerator)
from pelican.writers import Writer
from pelican.tests.support import unittest, get_settings
import locale
CUR_DIR = os.path.dirname(__file__) CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, 'content') CONTENT_DIR = os.path.join(CUR_DIR, 'content')
@ -35,7 +38,6 @@ class TestGenerator(unittest.TestCase):
def tearDown(self): def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale) locale.setlocale(locale.LC_ALL, self.old_locale)
def test_include_path(self): def test_include_path(self):
self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'} self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'}
@ -52,7 +54,8 @@ class TestGenerator(unittest.TestCase):
"""Test that Generator.get_files() properly excludes directories. """Test that Generator.get_files() properly excludes directories.
""" """
# We use our own Generator so we can give it our own content path # We use our own Generator so we can give it our own content path
generator = Generator(context=self.settings.copy(), generator = Generator(
context=self.settings.copy(),
settings=self.settings, settings=self.settings,
path=os.path.join(CUR_DIR, 'nested_content'), path=os.path.join(CUR_DIR, 'nested_content'),
theme=self.settings['THEME'], output_path=None) theme=self.settings['THEME'], output_path=None)
@ -60,34 +63,42 @@ class TestGenerator(unittest.TestCase):
filepaths = generator.get_files(paths=['maindir']) filepaths = generator.get_files(paths=['maindir'])
found_files = {os.path.basename(f) for f in filepaths} found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'} expected_files = {'maindir.md', 'subdir.md'}
self.assertFalse(expected_files - found_files, self.assertFalse(
expected_files - found_files,
"get_files() failed to find one or more files") "get_files() failed to find one or more files")
# Test string as `paths` argument rather than list # Test string as `paths` argument rather than list
filepaths = generator.get_files(paths='maindir') filepaths = generator.get_files(paths='maindir')
found_files = {os.path.basename(f) for f in filepaths} found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'} expected_files = {'maindir.md', 'subdir.md'}
self.assertFalse(expected_files - found_files, self.assertFalse(
expected_files - found_files,
"get_files() failed to find one or more files") "get_files() failed to find one or more files")
filepaths = generator.get_files(paths=[''], exclude=['maindir']) filepaths = generator.get_files(paths=[''], exclude=['maindir'])
found_files = {os.path.basename(f) for f in filepaths} found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn('maindir.md', found_files, self.assertNotIn(
'maindir.md', found_files,
"get_files() failed to exclude a top-level directory") "get_files() failed to exclude a top-level directory")
self.assertNotIn('subdir.md', found_files, self.assertNotIn(
'subdir.md', found_files,
"get_files() failed to exclude a subdir of an excluded directory") "get_files() failed to exclude a subdir of an excluded directory")
filepaths = generator.get_files(paths=[''], filepaths = generator.get_files(
paths=[''],
exclude=[os.path.join('maindir', 'subdir')]) exclude=[os.path.join('maindir', 'subdir')])
found_files = {os.path.basename(f) for f in filepaths} found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn('subdir.md', found_files, self.assertNotIn(
'subdir.md', found_files,
"get_files() failed to exclude a subdirectory") "get_files() failed to exclude a subdirectory")
filepaths = generator.get_files(paths=[''], exclude=['subdir']) filepaths = generator.get_files(paths=[''], exclude=['subdir'])
found_files = {os.path.basename(f) for f in filepaths} found_files = {os.path.basename(f) for f in filepaths}
self.assertIn('subdir.md', found_files, self.assertIn(
'subdir.md', found_files,
"get_files() excluded a subdirectory by name, ignoring its path") "get_files() excluded a subdirectory by name, ignoring its path")
class TestArticlesGenerator(unittest.TestCase): class TestArticlesGenerator(unittest.TestCase):
@classmethod @classmethod
@ -96,7 +107,7 @@ class TestArticlesGenerator(unittest.TestCase):
settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1) settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None} settings['READERS'] = {'asc': None}
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests settings['CACHE_CONTENT'] = False
cls.generator = ArticlesGenerator( cls.generator = ArticlesGenerator(
context=settings.copy(), settings=settings, context=settings.copy(), settings=settings,
@ -152,25 +163,30 @@ class TestArticlesGenerator(unittest.TestCase):
['Test mkd File', 'published', 'test', 'article'], ['Test mkd File', 'published', 'test', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'], ['This is a super article !', 'published', 'Yeah', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'], ['This is a super article !', 'published', 'Yeah', 'article'],
['Article with Nonconformant HTML meta tags', 'published', 'Default', 'article'], ['Article with Nonconformant HTML meta tags', 'published',
'Default', 'article'],
['This is a super article !', 'published', 'yeah', 'article'], ['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'], ['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'], ['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'Default', 'article'], ['This is a super article !', 'published', 'Default', 'article'],
['This is an article with category !', 'published', 'yeah', ['This is an article with category !', 'published', 'yeah',
'article'], 'article'],
['This is an article with multiple authors!', 'published', 'Default', 'article'], ['This is an article with multiple authors!', 'published',
['This is an article with multiple authors!', 'published', 'Default', 'article'], 'Default', 'article'],
['This is an article with multiple authors in list format!', 'published', 'Default', 'article'], ['This is an article with multiple authors!', 'published',
['This is an article with multiple authors in lastname, firstname format!', 'published', 'Default', 'article'], 'Default', 'article'],
['This is an article with multiple authors in list format!',
'published', 'Default', 'article'],
['This is an article with multiple authors in lastname, '
'firstname format!', 'published', 'Default', 'article'],
['This is an article without category !', 'published', 'Default', ['This is an article without category !', 'published', 'Default',
'article'], 'article'],
['This is an article without category !', 'published', ['This is an article without category !', 'published',
'TestCategory', 'article'], 'TestCategory', 'article'],
['An Article With Code Block To Test Typogrify Ignore', ['An Article With Code Block To Test Typogrify Ignore',
'published', 'Default', 'article'], 'published', 'Default', 'article'],
['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'published', ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定',
'指導書', 'article'], 'published', '指導書', 'article'],
] ]
self.assertEqual(sorted(articles_expected), sorted(self.articles)) self.assertEqual(sorted(articles_expected), sorted(self.articles))
@ -292,7 +308,7 @@ class TestArticlesGenerator(unittest.TestCase):
generator.generate_period_archives(write) generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970] dates = [d for d in generator.dates if d.date.year == 1970]
self.assertEqual(len(dates), 1) self.assertEqual(len(dates), 1)
#among other things it must have at least been called with this # among other things it must have at least been called with this
settings["period"] = (1970,) settings["period"] = (1970,)
write.assert_called_with("posts/1970/index.html", write.assert_called_with("posts/1970/index.html",
generator.get_template("period_archives"), generator.get_template("period_archives"),
@ -300,37 +316,42 @@ class TestArticlesGenerator(unittest.TestCase):
blog=True, dates=dates) blog=True, dates=dates)
del settings["period"] del settings["period"]
settings['MONTH_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/index.html' settings['MONTH_ARCHIVE_SAVE_AS'] = \
'posts/{date:%Y}/{date:%b}/index.html'
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=settings, settings=settings, context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context() generator.generate_context()
write = MagicMock() write = MagicMock()
generator.generate_period_archives(write) generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970 dates = [d for d in generator.dates
and d.date.month == 1] if d.date.year == 1970 and d.date.month == 1]
self.assertEqual(len(dates), 1) self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January") settings["period"] = (1970, "January")
#among other things it must have at least been called with this # among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/index.html", write.assert_called_with("posts/1970/Jan/index.html",
generator.get_template("period_archives"), generator.get_template("period_archives"),
settings, settings,
blog=True, dates=dates) blog=True, dates=dates)
del settings["period"] del settings["period"]
settings['DAY_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html' settings['DAY_ARCHIVE_SAVE_AS'] = \
'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=settings, settings=settings, context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context() generator.generate_context()
write = MagicMock() write = MagicMock()
generator.generate_period_archives(write) generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970 dates = [
and d.date.month == 1 d for d in generator.dates if
and d.date.day == 1] d.date.year == 1970 and
d.date.month == 1 and
d.date.day == 1
]
self.assertEqual(len(dates), 1) self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January", 1) settings["period"] = (1970, "January", 1)
#among other things it must have at least been called with this # among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/01/index.html", write.assert_called_with("posts/1970/Jan/01/index.html",
generator.get_template("period_archives"), generator.get_template("period_archives"),
settings, settings,
@ -347,11 +368,14 @@ class TestArticlesGenerator(unittest.TestCase):
def test_generate_authors(self): def test_generate_authors(self):
"""Check authors generation.""" """Check authors generation."""
authors = [author.name for author, _ in self.generator.authors] authors = [author.name for author, _ in self.generator.authors]
authors_expected = sorted(['Alexis Métaireau', 'Author, First', 'Author, Second', 'First Author', 'Second Author']) authors_expected = sorted(
['Alexis Métaireau', 'Author, First', 'Author, Second',
'First Author', 'Second Author'])
self.assertEqual(sorted(authors), authors_expected) self.assertEqual(sorted(authors), authors_expected)
# test for slug # test for slug
authors = [author.slug for author, _ in self.generator.authors] authors = [author.slug for author, _ in self.generator.authors]
authors_expected = ['alexis-metaireau', 'author-first', 'author-second', 'first-author', 'second-author'] authors_expected = ['alexis-metaireau', 'author-first',
'author-second', 'first-author', 'second-author']
self.assertEqual(sorted(authors), sorted(authors_expected)) self.assertEqual(sorted(authors), sorted(authors_expected))
def test_standard_metadata_in_default_metadata(self): def test_standard_metadata_in_default_metadata(self):
@ -391,7 +415,6 @@ class TestArticlesGenerator(unittest.TestCase):
settings = get_settings(filenames={}) settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1) settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
settings['ARTICLE_ORDER_BY'] = 'title' settings['ARTICLE_ORDER_BY'] = 'title'
generator = ArticlesGenerator( generator = ArticlesGenerator(
@ -420,7 +443,8 @@ class TestArticlesGenerator(unittest.TestCase):
'This is a super article !', 'This is a super article !',
'This is a super article !', 'This is a super article !',
'This is an article with category !', 'This is an article with category !',
'This is an article with multiple authors in lastname, firstname format!', ('This is an article with multiple authors in lastname, '
'firstname format!'),
'This is an article with multiple authors in list format!', 'This is an article with multiple authors in list format!',
'This is an article with multiple authors!', 'This is an article with multiple authors!',
'This is an article with multiple authors!', 'This is an article with multiple authors!',
@ -435,7 +459,6 @@ class TestArticlesGenerator(unittest.TestCase):
settings = get_settings(filenames={}) settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default' settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1) settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
settings['ARTICLE_ORDER_BY'] = 'reversed-title' settings['ARTICLE_ORDER_BY'] = 'reversed-title'
generator = ArticlesGenerator( generator = ArticlesGenerator(
@ -561,7 +584,7 @@ class TestPageGenerator(unittest.TestCase):
are generated correctly on pages are generated correctly on pages
""" """
settings = get_settings(filenames={}) settings = get_settings(filenames={})
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['CACHE_PATH'] = self.temp_cache settings['CACHE_PATH'] = self.temp_cache
settings['DEFAULT_DATE'] = (1970, 1, 1) settings['DEFAULT_DATE'] = (1970, 1, 1)
@ -586,7 +609,6 @@ class TestTemplatePagesGenerator(unittest.TestCase):
self.old_locale = locale.setlocale(locale.LC_ALL) self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C')) locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self): def tearDown(self):
rmtree(self.temp_content) rmtree(self.temp_content)
rmtree(self.temp_output) rmtree(self.temp_output)
@ -632,59 +654,67 @@ class TestStaticGenerator(unittest.TestCase):
def test_static_excludes(self): def test_static_excludes(self):
"""Test that StaticGenerator respects STATIC_EXCLUDES. """Test that StaticGenerator respects STATIC_EXCLUDES.
""" """
settings = get_settings(STATIC_EXCLUDES=['subdir'], settings = get_settings(
PATH=self.content_path, STATIC_PATHS=['']) STATIC_EXCLUDES=['subdir'],
PATH=self.content_path,
STATIC_PATHS=[''],
filenames={})
context = settings.copy() context = settings.copy()
context['filenames'] = {}
StaticGenerator(context=context, settings=settings, StaticGenerator(
context=context, settings=settings,
path=settings['PATH'], output_path=None, path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context() theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path) staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']] for c in context['staticfiles']]
self.assertNotIn('subdir_fake_image.jpg', staticnames, self.assertNotIn(
'subdir_fake_image.jpg', staticnames,
"StaticGenerator processed a file in a STATIC_EXCLUDES directory") "StaticGenerator processed a file in a STATIC_EXCLUDES directory")
self.assertIn('fake_image.jpg', staticnames, self.assertIn(
'fake_image.jpg', staticnames,
"StaticGenerator skipped a file that it should have included") "StaticGenerator skipped a file that it should have included")
def test_static_exclude_sources(self): def test_static_exclude_sources(self):
"""Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES. """Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES.
""" """
# Test STATIC_EXCLUDE_SOURCES=True
settings = get_settings(STATIC_EXCLUDE_SOURCES=True, settings = get_settings(
PATH=self.content_path, PAGE_PATHS=[''], STATIC_PATHS=[''], STATIC_EXCLUDE_SOURCES=True,
CACHE_CONTENT=False) PATH=self.content_path,
PAGE_PATHS=[''],
STATIC_PATHS=[''],
CACHE_CONTENT=False,
filenames={})
context = settings.copy() context = settings.copy()
context['filenames'] = {}
for generator_class in (PagesGenerator, StaticGenerator): for generator_class in (PagesGenerator, StaticGenerator):
generator_class(context=context, settings=settings, generator_class(
context=context, settings=settings,
path=settings['PATH'], output_path=None, path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context() theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path) staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']] for c in context['staticfiles']]
self.assertFalse(any(name.endswith(".md") for name in staticnames), self.assertFalse(
any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file") "STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file")
# Test STATIC_EXCLUDE_SOURCES=False
settings.update(STATIC_EXCLUDE_SOURCES=False) settings.update(STATIC_EXCLUDE_SOURCES=False)
context = settings.copy() context = settings.copy()
context['filenames'] = {} context['filenames'] = {}
for generator_class in (PagesGenerator, StaticGenerator): for generator_class in (PagesGenerator, StaticGenerator):
generator_class(context=context, settings=settings, generator_class(
context=context, settings=settings,
path=settings['PATH'], output_path=None, path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context() theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path) staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']] for c in context['staticfiles']]
self.assertTrue(any(name.endswith(".md") for name in staticnames), self.assertTrue(
any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=False failed to include a markdown file") "STATIC_EXCLUDE_SOURCES=False failed to include a markdown file")

View file

@ -1,16 +1,19 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import locale
import os import os
import re import re
import locale
from codecs import open from codecs import open
from pelican.tools.pelican_import import wp2fields, fields2pelican, decode_wp_content, build_header, build_markdown_header, get_attachments, download_attachments
from pelican.tests.support import (unittest, temporary_folder, mute,
skipIfNoExecutable)
from pelican.utils import slugify, path_to_file_url from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder,
unittest)
from pelican.tools.pelican_import import (build_header, build_markdown_header,
decode_wp_content,
download_attachments, fields2pelican,
get_attachments, wp2fields)
from pelican.utils import path_to_file_url, slugify
CUR_DIR = os.path.abspath(os.path.dirname(__file__)) CUR_DIR = os.path.abspath(os.path.dirname(__file__))
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml') WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
@ -32,7 +35,6 @@ except ImportError:
LXML = False LXML = False
@skipIfNoExecutable(['pandoc', '--version']) @skipIfNoExecutable(['pandoc', '--version'])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module') @unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
class TestWordpressXmlImporter(unittest.TestCase): class TestWordpressXmlImporter(unittest.TestCase):
@ -48,17 +50,19 @@ class TestWordpressXmlImporter(unittest.TestCase):
def test_ignore_empty_posts(self): def test_ignore_empty_posts(self):
self.assertTrue(self.posts) self.assertTrue(self.posts)
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts: for (title, content, fname, date, author,
self.assertTrue(title.strip()) categ, tags, status, kind, format) in self.posts:
self.assertTrue(title.strip())
def test_recognise_page_kind(self): def test_recognise_page_kind(self):
""" Check that we recognise pages in wordpress, as opposed to posts """ """ Check that we recognise pages in wordpress, as opposed to posts """
self.assertTrue(self.posts) self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page # Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = [] pages_data = []
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts: for (title, content, fname, date, author,
if kind == 'page': categ, tags, status, kind, format) in self.posts:
pages_data.append((title, fname)) if kind == 'page':
pages_data.append((title, fname))
self.assertEqual(2, len(pages_data)) self.assertEqual(2, len(pages_data))
self.assertEqual(('Page', 'contact'), pages_data[0]) self.assertEqual(('Page', 'contact'), pages_data[0])
self.assertEqual(('Empty Page', 'empty'), pages_data[1]) self.assertEqual(('Empty Page', 'empty'), pages_data[1])
@ -67,7 +71,8 @@ class TestWordpressXmlImporter(unittest.TestCase):
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts) test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp: with temporary_folder() as temp:
fname = list(silent_f2p(test_post, 'markdown', temp, dirpage=True))[0] fname = list(silent_f2p(test_post, 'markdown',
temp, dirpage=True))[0]
self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep)) self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
def test_dircat(self): def test_dircat(self):
@ -75,10 +80,11 @@ class TestWordpressXmlImporter(unittest.TestCase):
test_posts = [] test_posts = []
for post in self.posts: for post in self.posts:
# check post kind # check post kind
if len(post[5]) > 0: # Has a category if len(post[5]) > 0: # Has a category
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, dircat=True)) fnames = list(silent_f2p(test_posts, 'markdown',
temp, dircat=True))
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
@ -92,25 +98,33 @@ class TestWordpressXmlImporter(unittest.TestCase):
def test_unless_custom_post_all_items_should_be_pages_or_posts(self): def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts) self.assertTrue(self.posts)
pages_data = [] pages_data = []
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts: for (title, content, fname, date, author, categ,
if kind == 'page' or kind == 'article': tags, status, kind, format) in self.posts:
pass if kind == 'page' or kind == 'article':
else: pass
pages_data.append((title, fname)) else:
pages_data.append((title, fname))
self.assertEqual(0, len(pages_data)) self.assertEqual(0, len(pages_data))
def test_recognise_custom_post_type(self): def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts) self.assertTrue(self.custposts)
cust_data = [] cust_data = []
for title, content, fname, date, author, categ, tags, status, kind, format in self.custposts: for (title, content, fname, date, author, categ,
if kind == 'article' or kind == 'page': tags, status, kind, format) in self.custposts:
pass if kind == 'article' or kind == 'page':
else: pass
cust_data.append((title, kind)) else:
cust_data.append((title, kind))
self.assertEqual(3, len(cust_data)) self.assertEqual(3, len(cust_data))
self.assertEqual(('A custom post in category 4', 'custom1'), cust_data[0]) self.assertEqual(
self.assertEqual(('A custom post in category 5', 'custom1'), cust_data[1]) ('A custom post in category 4', 'custom1'),
self.assertEqual(('A 2nd custom post type also in category 5', 'custom2'), cust_data[2]) cust_data[0])
self.assertEqual(
('A custom post in category 5', 'custom1'),
cust_data[1])
self.assertEqual(
('A 2nd custom post type also in category 5', 'custom2'),
cust_data[2])
def test_custom_posts_put_in_own_dir(self): def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
@ -122,7 +136,8 @@ class TestWordpressXmlImporter(unittest.TestCase):
else: else:
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, wp_custpost = True)) fnames = list(silent_f2p(test_posts, 'markdown',
temp, wp_custpost=True))
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
@ -144,7 +159,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dircat=True)) wp_custpost=True, dircat=True))
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
@ -157,7 +172,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
index += 1 index += 1
def test_wp_custpost_true_dirpage_false(self): def test_wp_custpost_true_dirpage_false(self):
#pages should only be put in their own directory when dirpage = True # pages should only be put in their own directory when dirpage = True
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_posts = [] test_posts = []
for post in self.custposts: for post in self.custposts:
@ -166,7 +181,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dirpage=False)) wp_custpost=True, dirpage=False))
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
@ -175,7 +190,6 @@ class TestWordpressXmlImporter(unittest.TestCase):
out_name = fnames[index] out_name = fnames[index]
self.assertFalse(out_name.endswith(filename)) self.assertFalse(out_name.endswith(filename))
def test_can_toggle_raw_html_code_parsing(self): def test_can_toggle_raw_html_code_parsing(self):
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding='utf-8') as infile:
@ -184,10 +198,12 @@ class TestWordpressXmlImporter(unittest.TestCase):
with temporary_folder() as temp: with temporary_folder() as temp:
rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp)) rst_files = (r(f) for f
in silent_f2p(self.posts, 'markdown', temp))
self.assertTrue(any('<iframe' in rst for rst in rst_files)) self.assertTrue(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp, rst_files = (r(f) for f
strip_raw=True)) in silent_f2p(self.posts, 'markdown',
temp, strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files)) self.assertFalse(any('<iframe' in rst for rst in rst_files))
# no effect in rst # no effect in rst
rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp)) rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp))
@ -197,13 +213,14 @@ class TestWordpressXmlImporter(unittest.TestCase):
self.assertFalse(any('<iframe' in rst for rst in rst_files)) self.assertFalse(any('<iframe' in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self): def test_decode_html_entities_in_titles(self):
test_posts = [post for post in self.posts if post[2] == 'html-entity-test'] test_posts = [post for post
in self.posts if post[2] == 'html-entity-test']
self.assertEqual(len(test_posts), 1) self.assertEqual(len(test_posts), 1)
post = test_posts[0] post = test_posts[0]
title = post[0] title = post[0]
self.assertTrue(title, "A normal post with some <html> entities in the" self.assertTrue(title, "A normal post with some <html> entities in "
" title. You can't miss them.") "the title. You can't miss them.")
self.assertNotIn('&', title) self.assertNotIn('&', title)
def test_decode_wp_content_returns_empty(self): def test_decode_wp_content_returns_empty(self):
@ -216,14 +233,18 @@ class TestWordpressXmlImporter(unittest.TestCase):
encoded_content = encoded_file.read() encoded_content = encoded_file.read()
with open(WORDPRESS_DECODED_CONTENT_SAMPLE, 'r') as decoded_file: with open(WORDPRESS_DECODED_CONTENT_SAMPLE, 'r') as decoded_file:
decoded_content = decoded_file.read() decoded_content = decoded_file.read()
self.assertEqual(decode_wp_content(encoded_content, br=False), decoded_content) self.assertEqual(
decode_wp_content(encoded_content, br=False),
decoded_content)
def test_preserve_verbatim_formatting(self): def test_preserve_verbatim_formatting(self):
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding='utf-8') as infile:
return infile.read() return infile.read()
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts) test_post = filter(
lambda p: p[0].startswith("Code in List"),
self.posts)
with temporary_folder() as temp: with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0] md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md)) self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md))
@ -231,14 +252,17 @@ class TestWordpressXmlImporter(unittest.TestCase):
for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0) for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0)
print_line = re.search(r'\s+print i', md).group(0) print_line = re.search(r'\s+print i', md).group(0)
self.assertTrue(for_line.rindex('for') < print_line.rindex('print')) self.assertTrue(
for_line.rindex('for') < print_line.rindex('print'))
def test_code_in_list(self): def test_code_in_list(self):
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding='utf-8') as infile:
return infile.read() return infile.read()
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts) test_post = filter(
lambda p: p[0].startswith("Code in List"),
self.posts)
with temporary_folder() as temp: with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0] md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
sample_line = re.search(r'- This is a code sample', md).group(0) sample_line = re.search(r'- This is a code sample', md).group(0)
@ -285,26 +309,29 @@ class TestBuildHeader(unittest.TestCase):
self.assertEqual(build_header(*header_data), expected_docutils) self.assertEqual(build_header(*header_data), expected_docutils)
self.assertEqual(build_markdown_header(*header_data), expected_md) self.assertEqual(build_markdown_header(*header_data), expected_md)
def test_build_header_with_east_asian_characters(self): def test_build_header_with_east_asian_characters(self):
header = build_header('これは広い幅の文字だけで構成されたタイトルです', header = build_header('これは広い幅の文字だけで構成されたタイトルです',
None, None, None, None, None) None, None, None, None, None)
self.assertEqual(header, self.assertEqual(header,
'これは広い幅の文字だけで構成されたタイトルです\n' + ('これは広い幅の文字だけで構成されたタイトルです\n'
'##############################################\n\n') '##############################################'
'\n\n'))
def test_galleries_added_to_header(self): def test_galleries_added_to_header(self):
header = build_header('test', None, None, None, None, header = build_header('test', None, None, None, None, None,
None, attachments=['output/test1', 'output/test2']) attachments=['output/test1', 'output/test2'])
self.assertEqual(header, 'test\n####\n' + ':attachments: output/test1, ' self.assertEqual(header, ('test\n####\n'
+ 'output/test2\n\n') ':attachments: output/test1, '
'output/test2\n\n'))
def test_galleries_added_to_markdown_header(self): def test_galleries_added_to_markdown_header(self):
header = build_markdown_header('test', None, None, None, None, None, header = build_markdown_header('test', None, None, None, None, None,
attachments=['output/test1', 'output/test2']) attachments=['output/test1',
self.assertEqual(header, 'Title: test\n' + 'Attachments: output/test1, ' 'output/test2'])
+ 'output/test2\n\n') self.assertEqual(
header,
'Title: test\nAttachments: output/test1, output/test2\n\n')
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module') @unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
@ -326,14 +353,24 @@ class TestWordpressXMLAttachements(unittest.TestCase):
self.assertTrue(self.attachments) self.assertTrue(self.attachments)
for post in self.attachments.keys(): for post in self.attachments.keys():
if post is None: if post is None:
self.assertTrue(self.attachments[post][0] == 'https://upload.wikimedia.org/wikipedia/commons/thumb/2/2c/Pelican_lakes_entrance02.jpg/240px-Pelican_lakes_entrance02.jpg') expected = ('https://upload.wikimedia.org/wikipedia/commons/'
'thumb/2/2c/Pelican_lakes_entrance02.jpg/'
'240px-Pelican_lakes_entrance02.jpg')
self.assertEqual(self.attachments[post][0], expected)
elif post == 'with-excerpt': elif post == 'with-excerpt':
self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain/not_an_image.jpg') expected_invalid = ('http://thisurlisinvalid.notarealdomain/'
self.assertTrue(self.attachments[post][1] == 'http://en.wikipedia.org/wiki/File:Pelikan_Walvis_Bay.jpg') 'not_an_image.jpg')
expected_pelikan = ('http://en.wikipedia.org/wiki/'
'File:Pelikan_Walvis_Bay.jpg')
self.assertEqual(self.attachments[post][0], expected_invalid)
self.assertEqual(self.attachments[post][1], expected_pelikan)
elif post == 'with-tags': elif post == 'with-tags':
self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain') expected_invalid = ('http://thisurlisinvalid.notarealdomain')
self.assertEqual(self.attachments[post][0], expected_invalid)
else: else:
self.fail('all attachments should match to a filename or None, {}'.format(post)) self.fail('all attachments should match to a '
'filename or None, {}'
.format(post))
def test_download_attachments(self): def test_download_attachments(self):
real_file = os.path.join(CUR_DIR, 'content/article.rst') real_file = os.path.join(CUR_DIR, 'content/article.rst')
@ -344,4 +381,6 @@ class TestWordpressXMLAttachements(unittest.TestCase):
locations = list(silent_da(temp, [good_url, bad_url])) locations = list(silent_da(temp, [good_url, bad_url]))
self.assertEqual(1, len(locations)) self.assertEqual(1, len(locations))
directory = locations[0] directory = locations[0]
self.assertTrue(directory.endswith(os.path.join('content', 'article.rst')), directory) self.assertTrue(
directory.endswith(os.path.join('content', 'article.rst')),
directory)

View file

@ -1,18 +1,21 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import from __future__ import absolute_import, unicode_literals
import locale import locale
from pelican.tests.support import unittest, get_settings
from pelican.paginator import Paginator
from pelican.contents import Article, Author
from pelican.settings import DEFAULT_CONFIG
from jinja2.utils import generate_lorem_ipsum from jinja2.utils import generate_lorem_ipsum
from pelican.contents import Article, Author
from pelican.paginator import Paginator
from pelican.settings import DEFAULT_CONFIG
from pelican.tests.support import get_settings, unittest
# generate one paragraph, enclosed with <p> # generate one paragraph, enclosed with <p>
TEST_CONTENT = str(generate_lorem_ipsum(n=1)) TEST_CONTENT = str(generate_lorem_ipsum(n=1))
TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False) TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False)
class TestPage(unittest.TestCase): class TestPage(unittest.TestCase):
def setUp(self): def setUp(self):
super(TestPage, self).setUp() super(TestPage, self).setUp()
@ -49,7 +52,8 @@ class TestPage(unittest.TestCase):
) )
self.page_kwargs['metadata']['author'] = Author('Blogger', settings) self.page_kwargs['metadata']['author'] = Author('Blogger', settings)
object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)] object_list = [Article(**self.page_kwargs),
Article(**self.page_kwargs)]
paginator = Paginator('foobar.foo', object_list, settings) paginator = Paginator('foobar.foo', object_list, settings)
page = paginator.page(1) page = paginator.page(1)
self.assertEqual(page.save_as, 'foobar.foo') self.assertEqual(page.save_as, 'foobar.foo')

View file

@ -1,23 +1,25 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import collections import collections
import os
import sys
from tempfile import mkdtemp
from shutil import rmtree
import locale import locale
import logging import logging
import os
import subprocess import subprocess
import sys
from shutil import rmtree
from tempfile import mkdtemp
from pelican import Pelican from pelican import Pelican
from pelican.generators import StaticGenerator from pelican.generators import StaticGenerator
from pelican.settings import read_settings from pelican.settings import read_settings
from pelican.tests.support import LoggedTestCase, mute, locale_available, unittest from pelican.tests.support import (LoggedTestCase, locale_available,
mute, unittest)
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_PATH = os.path.abspath(os.path.join( SAMPLES_PATH = os.path.abspath(os.path.join(
CURRENT_DIR, os.pardir, os.pardir, 'samples')) CURRENT_DIR, os.pardir, os.pardir, 'samples'))
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output')) OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output'))
INPUT_PATH = os.path.join(SAMPLES_PATH, "content") INPUT_PATH = os.path.join(SAMPLES_PATH, "content")
@ -27,13 +29,10 @@ SAMPLE_FR_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf_FR.py")
def recursiveDiff(dcmp): def recursiveDiff(dcmp):
diff = { diff = {
'diff_files': [os.path.join(dcmp.right, f) 'diff_files': [os.path.join(dcmp.right, f) for f in dcmp.diff_files],
for f in dcmp.diff_files], 'left_only': [os.path.join(dcmp.right, f) for f in dcmp.left_only],
'left_only': [os.path.join(dcmp.right, f) 'right_only': [os.path.join(dcmp.right, f) for f in dcmp.right_only],
for f in dcmp.left_only], }
'right_only': [os.path.join(dcmp.right, f)
for f in dcmp.right_only],
}
for sub_dcmp in dcmp.subdirs.values(): for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).items(): for k, v in recursiveDiff(sub_dcmp).items():
diff[k] += v diff[k] += v
@ -60,9 +59,13 @@ class TestPelican(LoggedTestCase):
def assertDirsEqual(self, left_path, right_path): def assertDirsEqual(self, left_path, right_path):
out, err = subprocess.Popen( out, err = subprocess.Popen(
['git', 'diff', '--no-ext-diff', '--exit-code', '-w', left_path, right_path], ['git', 'diff', '--no-ext-diff', '--exit-code',
env={str('PAGER'): str('')}, stdout=subprocess.PIPE, stderr=subprocess.PIPE '-w', left_path, right_path],
env={str('PAGER'): str('')},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate() ).communicate()
def ignorable_git_crlf_errors(line): def ignorable_git_crlf_errors(line):
# Work around for running tests on Windows # Work around for running tests on Windows
for msg in [ for msg in [
@ -85,9 +88,11 @@ class TestPelican(LoggedTestCase):
pelican = Pelican(settings=read_settings(path=None)) pelican = Pelican(settings=read_settings(path=None))
generator_classes = pelican.get_generator_classes() generator_classes = pelican.get_generator_classes()
self.assertTrue(generator_classes[-1] is StaticGenerator, self.assertTrue(
generator_classes[-1] is StaticGenerator,
"StaticGenerator must be the last generator, but it isn't!") "StaticGenerator must be the last generator, but it isn't!")
self.assertIsInstance(generator_classes, collections.Sequence, self.assertIsInstance(
generator_classes, collections.Sequence,
"get_generator_classes() must return a Sequence to preserve order") "get_generator_classes() must return a Sequence to preserve order")
def test_basic_generation_works(self): def test_basic_generation_works(self):
@ -98,10 +103,11 @@ class TestPelican(LoggedTestCase):
'OUTPUT_PATH': self.temp_path, 'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache, 'CACHE_PATH': self.temp_cache,
'LOCALE': locale.normalize('en_US'), 'LOCALE': locale.normalize('en_US'),
}) })
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'basic')) self.assertDirsEqual(
self.temp_path, os.path.join(OUTPUT_PATH, 'basic'))
self.assertLogCountEqual( self.assertLogCountEqual(
count=3, count=3,
msg="Unable to find.*skipping url replacement", msg="Unable to find.*skipping url replacement",
@ -114,10 +120,11 @@ class TestPelican(LoggedTestCase):
'OUTPUT_PATH': self.temp_path, 'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache, 'CACHE_PATH': self.temp_cache,
'LOCALE': locale.normalize('en_US'), 'LOCALE': locale.normalize('en_US'),
}) })
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom')) self.assertDirsEqual(
self.temp_path, os.path.join(OUTPUT_PATH, 'custom'))
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or @unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'), 'French locale needed') locale_available('French'), 'French locale needed')
@ -133,10 +140,11 @@ class TestPelican(LoggedTestCase):
'OUTPUT_PATH': self.temp_path, 'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache, 'CACHE_PATH': self.temp_cache,
'LOCALE': our_locale, 'LOCALE': our_locale,
}) })
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale')) self.assertDirsEqual(
self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale'))
def test_theme_static_paths_copy(self): def test_theme_static_paths_copy(self):
# the same thing with a specified set of settings should work # the same thing with a specified set of settings should work
@ -146,8 +154,9 @@ class TestPelican(LoggedTestCase):
'CACHE_PATH': self.temp_cache, 'CACHE_PATH': self.temp_cache,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'), 'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'),
os.path.join(SAMPLES_PATH, 'kinda'), os.path.join(SAMPLES_PATH, 'kinda'),
os.path.join(SAMPLES_PATH, 'theme_standard')] os.path.join(SAMPLES_PATH,
}) 'theme_standard')]
})
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme') theme_output = os.path.join(self.temp_path, 'theme')
@ -165,8 +174,9 @@ class TestPelican(LoggedTestCase):
'PATH': INPUT_PATH, 'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path, 'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache, 'CACHE_PATH': self.temp_cache,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'theme_standard')] 'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH,
}) 'theme_standard')]
})
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
@ -184,9 +194,9 @@ class TestPelican(LoggedTestCase):
'WRITE_SELECTED': [ 'WRITE_SELECTED': [
os.path.join(self.temp_path, 'oh-yeah.html'), os.path.join(self.temp_path, 'oh-yeah.html'),
os.path.join(self.temp_path, 'categories.html'), os.path.join(self.temp_path, 'categories.html'),
], ],
'LOCALE': locale.normalize('en_US'), 'LOCALE': locale.normalize('en_US'),
}) })
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
logger = logging.getLogger() logger = logging.getLogger()
orig_level = logger.getEffectiveLevel() orig_level = logger.getEffectiveLevel()

View file

@ -1,11 +1,12 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import os import os
from pelican import readers from pelican import readers
from pelican.tests.support import get_settings, unittest
from pelican.utils import SafeDatetime from pelican.utils import SafeDatetime
from pelican.tests.support import unittest, get_settings
CUR_DIR = os.path.dirname(__file__) CUR_DIR = os.path.dirname(__file__)
CONTENT_PATH = os.path.join(CUR_DIR, 'content') CONTENT_PATH = os.path.join(CUR_DIR, 'content')
@ -29,22 +30,26 @@ class ReaderTest(unittest.TestCase):
self.assertEqual( self.assertEqual(
value, value,
real_value, real_value,
'Expected %s to have value %s, but was %s' % (key, value, real_value)) 'Expected %s to have value %s, but was %s' %
(key, value, real_value))
else: else:
self.fail( self.fail(
'Expected %s to have value %s, but was not in Dict' % (key, value)) 'Expected %s to have value %s, but was not in Dict' %
(key, value))
class TestAssertDictHasSubset(ReaderTest): class TestAssertDictHasSubset(ReaderTest):
def setUp(self): def setUp(self):
self.dictionary = { self.dictionary = {
'key-a' : 'val-a', 'key-a': 'val-a',
'key-b' : 'val-b'} 'key-b': 'val-b'
}
def tearDown(self): def tearDown(self):
self.dictionary = None self.dictionary = None
def test_subset(self): def test_subset(self):
self.assertDictHasSubset(self.dictionary, {'key-a':'val-a'}) self.assertDictHasSubset(self.dictionary, {'key-a': 'val-a'})
def test_equal(self): def test_equal(self):
self.assertDictHasSubset(self.dictionary, self.dictionary) self.assertDictHasSubset(self.dictionary, self.dictionary)
@ -54,18 +59,17 @@ class TestAssertDictHasSubset(ReaderTest):
AssertionError, AssertionError,
'Expected.*key-c.*to have value.*val-c.*but was not in Dict', 'Expected.*key-c.*to have value.*val-c.*but was not in Dict',
self.assertDictHasSubset, self.assertDictHasSubset,
self.dictionary, self.dictionary,
{'key-c':'val-c'} {'key-c': 'val-c'})
)
def test_fail_wrong_val(self): def test_fail_wrong_val(self):
self.assertRaisesRegexp( self.assertRaisesRegexp(
AssertionError, AssertionError,
'Expected .*key-a.* to have value .*val-b.* but was .*val-a.*', 'Expected .*key-a.* to have value .*val-b.* but was .*val-a.*',
self.assertDictHasSubset, self.assertDictHasSubset,
self.dictionary, self.dictionary,
{'key-a':'val-b'} {'key-a': 'val-b'})
)
class DefaultReaderTest(ReaderTest): class DefaultReaderTest(ReaderTest):
@ -153,17 +157,17 @@ class RstReaderTest(ReaderTest):
'(?P<date>\d{4}-\d{2}-\d{2})' '(?P<date>\d{4}-\d{2}-\d{2})'
'_(?P<Slug>.*)' '_(?P<Slug>.*)'
'#(?P<MyMeta>.*)-(?P<author>.*)' '#(?P<MyMeta>.*)-(?P<author>.*)'
), ),
EXTRA_PATH_METADATA={ EXTRA_PATH_METADATA={
input_with_metadata: { input_with_metadata: {
'key-1a': 'value-1a', 'key-1a': 'value-1a',
'key-1b': 'value-1b' 'key-1b': 'value-1b'
}
} }
) }
)
expected_metadata = { expected_metadata = {
'category': 'yeah', 'category': 'yeah',
'author' : 'Alexis Métaireau', 'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata', 'title': 'Rst with filename metadata',
'date': SafeDatetime(2012, 11, 29), 'date': SafeDatetime(2012, 11, 29),
'slug': 'rst_w_filename_meta', 'slug': 'rst_w_filename_meta',
@ -179,38 +183,41 @@ class RstReaderTest(ReaderTest):
path=input_file_path_without_metadata, path=input_file_path_without_metadata,
EXTRA_PATH_METADATA={ EXTRA_PATH_METADATA={
input_file_path_without_metadata: { input_file_path_without_metadata: {
'author': 'Charlès Overwrite'} 'author': 'Charlès Overwrite'
} }
) }
)
expected_without_metadata = { expected_without_metadata = {
'category' : 'misc', 'category': 'misc',
'author' : 'Charlès Overwrite', 'author': 'Charlès Overwrite',
'title' : 'Article title', 'title': 'Article title',
'reader' : 'rst', 'reader': 'rst',
} }
self.assertDictHasSubset( self.assertDictHasSubset(
page_without_metadata.metadata, page_without_metadata.metadata,
expected_without_metadata) expected_without_metadata)
def test_article_extra_path_metadata_dont_overwrite(self): def test_article_extra_path_metadata_dont_overwrite(self):
#EXTRA_PATH_METADATA['author'] should get ignored # EXTRA_PATH_METADATA['author'] should get ignored
#since we don't overwrite already set values # since we don't overwrite already set values
input_file_path = '2012-11-29_rst_w_filename_meta#foo-bar.rst' input_file_path = '2012-11-29_rst_w_filename_meta#foo-bar.rst'
page = self.read_file( page = self.read_file(
path=input_file_path, path=input_file_path,
FILENAME_METADATA=( FILENAME_METADATA=(
'(?P<date>\d{4}-\d{2}-\d{2})' '(?P<date>\d{4}-\d{2}-\d{2})'
'_(?P<Slug>.*)' '_(?P<Slug>.*)'
'#(?P<MyMeta>.*)-(?P<orginalauthor>.*)'), '#(?P<MyMeta>.*)-(?P<orginalauthor>.*)'
),
EXTRA_PATH_METADATA={ EXTRA_PATH_METADATA={
input_file_path: { input_file_path: {
'author': 'Charlès Overwrite', 'author': 'Charlès Overwrite',
'key-1b': 'value-1b'} 'key-1b': 'value-1b'
} }
) }
)
expected = { expected = {
'category': 'yeah', 'category': 'yeah',
'author' : 'Alexis Métaireau', 'author': 'Alexis Métaireau',
'title': 'Rst with filename metadata', 'title': 'Rst with filename metadata',
'date': SafeDatetime(2012, 11, 29), 'date': SafeDatetime(2012, 11, 29),
'slug': 'rst_w_filename_meta', 'slug': 'rst_w_filename_meta',
@ -273,7 +280,7 @@ class RstReaderTest(ReaderTest):
# typogrify should be able to ignore user specified tags, # typogrify should be able to ignore user specified tags,
# but tries to be clever with widont extension # but tries to be clever with widont extension
page = self.read_file(path='article.rst', TYPOGRIFY=True, page = self.read_file(path='article.rst', TYPOGRIFY=True,
TYPOGRIFY_IGNORE_TAGS = ['p']) TYPOGRIFY_IGNORE_TAGS=['p'])
expected = ('<p>THIS is some content. With some stuff to&nbsp;' expected = ('<p>THIS is some content. With some stuff to&nbsp;'
'&quot;typogrify&quot;...</p>\n<p>Now with added ' '&quot;typogrify&quot;...</p>\n<p>Now with added '
'support for <abbr title="three letter acronym">' 'support for <abbr title="three letter acronym">'
@ -284,7 +291,7 @@ class RstReaderTest(ReaderTest):
# typogrify should ignore code blocks by default because # typogrify should ignore code blocks by default because
# code blocks are composed inside the pre tag # code blocks are composed inside the pre tag
page = self.read_file(path='article_with_code_block.rst', page = self.read_file(path='article_with_code_block.rst',
TYPOGRIFY=True) TYPOGRIFY=True)
expected = ('<p>An article with some&nbsp;code</p>\n' expected = ('<p>An article with some&nbsp;code</p>\n'
'<div class="highlight"><pre><span class="n">x</span>' '<div class="highlight"><pre><span class="n">x</span>'
@ -292,13 +299,17 @@ class RstReaderTest(ReaderTest):
' <span class="n">y</span>\n</pre></div>\n' ' <span class="n">y</span>\n</pre></div>\n'
'<p>A block&nbsp;quote:</p>\n<blockquote>\nx ' '<p>A block&nbsp;quote:</p>\n<blockquote>\nx '
'<span class="amp">&amp;</span> y</blockquote>\n' '<span class="amp">&amp;</span> y</blockquote>\n'
'<p>Normal:\nx <span class="amp">&amp;</span>&nbsp;y</p>\n') '<p>Normal:\nx'
' <span class="amp">&amp;</span>'
'&nbsp;y'
'</p>\n')
self.assertEqual(page.content, expected) self.assertEqual(page.content, expected)
# instruct typogrify to also ignore blockquotes # instruct typogrify to also ignore blockquotes
page = self.read_file(path='article_with_code_block.rst', page = self.read_file(path='article_with_code_block.rst',
TYPOGRIFY=True, TYPOGRIFY_IGNORE_TAGS = ['blockquote']) TYPOGRIFY=True,
TYPOGRIFY_IGNORE_TAGS=['blockquote'])
expected = ('<p>An article with some&nbsp;code</p>\n' expected = ('<p>An article with some&nbsp;code</p>\n'
'<div class="highlight"><pre><span class="n">x</span>' '<div class="highlight"><pre><span class="n">x</span>'
@ -306,7 +317,10 @@ class RstReaderTest(ReaderTest):
' <span class="n">y</span>\n</pre></div>\n' ' <span class="n">y</span>\n</pre></div>\n'
'<p>A block&nbsp;quote:</p>\n<blockquote>\nx ' '<p>A block&nbsp;quote:</p>\n<blockquote>\nx '
'&amp; y</blockquote>\n' '&amp; y</blockquote>\n'
'<p>Normal:\nx <span class="amp">&amp;</span>&nbsp;y</p>\n') '<p>Normal:\nx'
' <span class="amp">&amp;</span>'
'&nbsp;y'
'</p>\n')
self.assertEqual(page.content, expected) self.assertEqual(page.content, expected)
except ImportError: except ImportError:
@ -339,6 +353,7 @@ class RstReaderTest(ReaderTest):
self.assertDictHasSubset(page.metadata, expected) self.assertDictHasSubset(page.metadata, expected)
@unittest.skipUnless(readers.Markdown, "markdown isn't installed") @unittest.skipUnless(readers.Markdown, "markdown isn't installed")
class MdReaderTest(ReaderTest): class MdReaderTest(ReaderTest):
@ -400,7 +415,8 @@ class MdReaderTest(ReaderTest):
'modified': SafeDatetime(2012, 11, 1), 'modified': SafeDatetime(2012, 11, 1),
'multiline': [ 'multiline': [
'Line Metadata should be handle properly.', 'Line Metadata should be handle properly.',
'See syntax of Meta-Data extension of Python Markdown package:', 'See syntax of Meta-Data extension of '
'Python Markdown package:',
'If a line is indented by 4 or more spaces,', 'If a line is indented by 4 or more spaces,',
'that line is assumed to be an additional line of the value', 'that line is assumed to be an additional line of the value',
'for the previous keyword.', 'for the previous keyword.',

View file

@ -1,5 +1,8 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
from pelican.tests.support import unittest
try: try:
from unittest.mock import Mock from unittest.mock import Mock
except ImportError: except ImportError:
@ -7,7 +10,7 @@ except ImportError:
from mock import Mock from mock import Mock
except ImportError: except ImportError:
Mock = False Mock = False
from pelican.tests.support import unittest
@unittest.skipUnless(Mock, 'Needs Mock module') @unittest.skipUnless(Mock, 'Needs Mock module')
class Test_abbr_role(unittest.TestCase): class Test_abbr_role(unittest.TestCase):

View file

@ -1,13 +1,15 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import copy
import os
import locale
from sys import platform
from os.path import dirname, abspath, join
from pelican.settings import (read_settings, configure_settings, import copy
DEFAULT_CONFIG, DEFAULT_THEME) import locale
import os
from os.path import abspath, dirname, join
from sys import platform
from pelican.settings import (DEFAULT_CONFIG, DEFAULT_THEME,
configure_settings, read_settings)
from pelican.tests.support import unittest from pelican.tests.support import unittest
@ -28,12 +30,14 @@ class TestSettingsConfiguration(unittest.TestCase):
def test_overwrite_existing_settings(self): def test_overwrite_existing_settings(self):
self.assertEqual(self.settings.get('SITENAME'), "Alexis' log") self.assertEqual(self.settings.get('SITENAME'), "Alexis' log")
self.assertEqual(self.settings.get('SITEURL'), self.assertEqual(
'http://blog.notmyidea.org') self.settings.get('SITEURL'),
'http://blog.notmyidea.org')
def test_keep_default_settings(self): def test_keep_default_settings(self):
# Keep default settings if not defined. # Keep default settings if not defined.
self.assertEqual(self.settings.get('DEFAULT_CATEGORY'), self.assertEqual(
self.settings.get('DEFAULT_CATEGORY'),
DEFAULT_CONFIG['DEFAULT_CATEGORY']) DEFAULT_CONFIG['DEFAULT_CATEGORY'])
def test_dont_copy_small_keys(self): def test_dont_copy_small_keys(self):
@ -69,28 +73,31 @@ class TestSettingsConfiguration(unittest.TestCase):
def test_static_path_settings_safety(self): def test_static_path_settings_safety(self):
# Disallow static paths from being strings # Disallow static paths from being strings
settings = {'STATIC_PATHS': 'foo/bar', settings = {
'THEME_STATIC_PATHS': 'bar/baz', 'STATIC_PATHS': 'foo/bar',
# These 4 settings are required to run configure_settings 'THEME_STATIC_PATHS': 'bar/baz',
'PATH': '.', # These 4 settings are required to run configure_settings
'THEME': DEFAULT_THEME, 'PATH': '.',
'SITEURL': 'http://blog.notmyidea.org/', 'THEME': DEFAULT_THEME,
'LOCALE': '', 'SITEURL': 'http://blog.notmyidea.org/',
} 'LOCALE': '',
}
configure_settings(settings) configure_settings(settings)
self.assertEqual(settings['STATIC_PATHS'], self.assertEqual(
DEFAULT_CONFIG['STATIC_PATHS']) settings['STATIC_PATHS'],
self.assertEqual(settings['THEME_STATIC_PATHS'], DEFAULT_CONFIG['STATIC_PATHS'])
DEFAULT_CONFIG['THEME_STATIC_PATHS']) self.assertEqual(
settings['THEME_STATIC_PATHS'],
DEFAULT_CONFIG['THEME_STATIC_PATHS'])
def test_configure_settings(self): def test_configure_settings(self):
# Manipulations to settings should be applied correctly. # Manipulations to settings should be applied correctly.
settings = { settings = {
'SITEURL': 'http://blog.notmyidea.org/', 'SITEURL': 'http://blog.notmyidea.org/',
'LOCALE': '', 'LOCALE': '',
'PATH': os.curdir, 'PATH': os.curdir,
'THEME': DEFAULT_THEME, 'THEME': DEFAULT_THEME,
} }
configure_settings(settings) configure_settings(settings)
# SITEURL should not have a trailing slash # SITEURL should not have a trailing slash
@ -154,7 +161,7 @@ class TestSettingsConfiguration(unittest.TestCase):
settings['PATH'] = '' settings['PATH'] = ''
self.assertRaises(Exception, configure_settings, settings) self.assertRaises(Exception, configure_settings, settings)
# Test nonexistent THEME # Test nonexistent THEME
settings['PATH'] = os.curdir settings['PATH'] = os.curdir
settings['THEME'] = 'foo' settings['THEME'] = 'foo'

View file

@ -1,8 +1,9 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
from pelican.urlwrappers import URLWrapper, Tag, Category
from pelican.tests.support import unittest from pelican.tests.support import unittest
from pelican.urlwrappers import Category, Tag, URLWrapper
class TestURLWrapper(unittest.TestCase): class TestURLWrapper(unittest.TestCase):
def test_ordering(self): def test_ordering(self):

View file

@ -1,20 +1,22 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import from __future__ import absolute_import, print_function, unicode_literals
import logging
import shutil
import os
import time
import locale import locale
import logging
import os
import shutil
import time
from sys import platform from sys import platform
from tempfile import mkdtemp from tempfile import mkdtemp
import pytz import pytz
from pelican.generators import TemplatePagesGenerator
from pelican.writers import Writer
from pelican.settings import read_settings
from pelican import utils from pelican import utils
from pelican.tests.support import get_article, LoggedTestCase, locale_available, unittest from pelican.generators import TemplatePagesGenerator
from pelican.settings import read_settings
from pelican.tests.support import (LoggedTestCase, get_article,
locale_available, unittest)
from pelican.writers import Writer
class TestUtils(LoggedTestCase): class TestUtils(LoggedTestCase):
@ -72,7 +74,7 @@ class TestUtils(LoggedTestCase):
'2012-11-22T22:11:10Z': date_hour_sec_z, '2012-11-22T22:11:10Z': date_hour_sec_z,
'2012-11-22T22:11:10-0500': date_hour_sec_est, '2012-11-22T22:11:10-0500': date_hour_sec_est,
'2012-11-22T22:11:10.123Z': date_hour_sec_frac_z, '2012-11-22T22:11:10.123Z': date_hour_sec_frac_z,
} }
# examples from http://www.w3.org/TR/NOTE-datetime # examples from http://www.w3.org/TR/NOTE-datetime
iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16) iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16)
@ -95,7 +97,6 @@ class TestUtils(LoggedTestCase):
# invalid ones # invalid ones
invalid_dates = ['2010-110-12', 'yay'] invalid_dates = ['2010-110-12', 'yay']
for value, expected in dates.items(): for value, expected in dates.items():
self.assertEqual(utils.get_date(value), expected, value) self.assertEqual(utils.get_date(value), expected, value)
@ -290,7 +291,9 @@ class TestUtils(LoggedTestCase):
self.assertEqual(utils.strftime(d, '%d/%m/%Y'), '29/08/2012') self.assertEqual(utils.strftime(d, '%d/%m/%Y'), '29/08/2012')
# RFC 3339 # RFC 3339
self.assertEqual(utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'),'2012-08-29T00:00:00Z') self.assertEqual(
utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'),
'2012-08-29T00:00:00Z')
# % escaped # % escaped
self.assertEqual(utils.strftime(d, '%d%%%m%%%y'), '29%08%12') self.assertEqual(utils.strftime(d, '%d%%%m%%%y'), '29%08%12')
@ -306,8 +309,9 @@ class TestUtils(LoggedTestCase):
'Published in 29-08-2012') 'Published in 29-08-2012')
# with non-ascii text # with non-ascii text
self.assertEqual(utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'), self.assertEqual(
'29/08/2012 Øl trinken beim Besäufnis') utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'),
'29/08/2012 Øl trinken beim Besäufnis')
# alternative formatting options # alternative formatting options
self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '29/8/12') self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '29/8/12')
@ -316,7 +320,6 @@ class TestUtils(LoggedTestCase):
d = utils.SafeDatetime(2012, 8, 9) d = utils.SafeDatetime(2012, 8, 9)
self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '9/8/12') self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '9/8/12')
# test the output of utils.strftime in a different locale # test the output of utils.strftime in a different locale
# Turkish locale # Turkish locale
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or @unittest.skipUnless(locale_available('tr_TR.UTF-8') or
@ -339,17 +342,18 @@ class TestUtils(LoggedTestCase):
'Çarşamba, 29 Ağustos 2012') 'Çarşamba, 29 Ağustos 2012')
# with text # with text
self.assertEqual(utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'), self.assertEqual(
utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'),
'Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012') 'Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012')
# non-ascii format candidate (someone might pass it... for some reason) # non-ascii format candidate (someone might pass it... for some reason)
self.assertEqual(utils.strftime(d, '%Y yılında %üretim artışı'), self.assertEqual(
utils.strftime(d, '%Y yılında %üretim artışı'),
'2012 yılında %üretim artışı') '2012 yılında %üretim artışı')
# restore locale back # restore locale back
locale.setlocale(locale.LC_ALL, old_locale) locale.setlocale(locale.LC_ALL, old_locale)
# test the output of utils.strftime in a different locale # test the output of utils.strftime in a different locale
# French locale # French locale
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or @unittest.skipUnless(locale_available('fr_FR.UTF-8') or
@ -373,21 +377,28 @@ class TestUtils(LoggedTestCase):
self.assertTrue(utils.strftime(d, '%A') in ('mercredi', 'Mercredi')) self.assertTrue(utils.strftime(d, '%A') in ('mercredi', 'Mercredi'))
# with text # with text
self.assertEqual(utils.strftime(d, 'Écrit le %d %B %Y'), self.assertEqual(
utils.strftime(d, 'Écrit le %d %B %Y'),
'Écrit le 29 août 2012') 'Écrit le 29 août 2012')
# non-ascii format candidate (someone might pass it... for some reason) # non-ascii format candidate (someone might pass it... for some reason)
self.assertEqual(utils.strftime(d, '%écrits en %Y'), self.assertEqual(
utils.strftime(d, '%écrits en %Y'),
'%écrits en 2012') '%écrits en 2012')
# restore locale back # restore locale back
locale.setlocale(locale.LC_ALL, old_locale) locale.setlocale(locale.LC_ALL, old_locale)
def test_maybe_pluralize(self): def test_maybe_pluralize(self):
self.assertEqual(utils.maybe_pluralize(0, 'Article', 'Articles'), '0 Articles') self.assertEqual(
self.assertEqual(utils.maybe_pluralize(1, 'Article', 'Articles'), '1 Article') utils.maybe_pluralize(0, 'Article', 'Articles'),
self.assertEqual(utils.maybe_pluralize(2, 'Article', 'Articles'), '2 Articles') '0 Articles')
self.assertEqual(
utils.maybe_pluralize(1, 'Article', 'Articles'),
'1 Article')
self.assertEqual(
utils.maybe_pluralize(2, 'Article', 'Articles'),
'2 Articles')
class TestCopy(unittest.TestCase): class TestCopy(unittest.TestCase):
@ -435,8 +446,9 @@ class TestCopy(unittest.TestCase):
def test_copy_file_create_dirs(self): def test_copy_file_create_dirs(self):
self._create_file('a.txt') self._create_file('a.txt')
utils.copy(os.path.join(self.root_dir, 'a.txt'), utils.copy(
os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt')) os.path.join(self.root_dir, 'a.txt'),
os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt'))
self._exist_dir('b0') self._exist_dir('b0')
self._exist_dir('b0', 'b1') self._exist_dir('b0', 'b1')
self._exist_dir('b0', 'b1', 'b2') self._exist_dir('b0', 'b1', 'b2')
@ -491,35 +503,39 @@ class TestDateFormatter(unittest.TestCase):
template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}') template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}')
self.date = utils.SafeDatetime(2012, 8, 29) self.date = utils.SafeDatetime(2012, 8, 29)
def tearDown(self): def tearDown(self):
shutil.rmtree(self.temp_content) shutil.rmtree(self.temp_content)
shutil.rmtree(self.temp_output) shutil.rmtree(self.temp_output)
# reset locale to default # reset locale to default
locale.setlocale(locale.LC_ALL, '') locale.setlocale(locale.LC_ALL, '')
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or @unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'), locale_available('French'),
'French locale needed') 'French locale needed')
def test_french_strftime(self): def test_french_strftime(self):
# This test tries to reproduce an issue that occurred with python3.3 under macos10 only # This test tries to reproduce an issue that
# occurred with python3.3 under macos10 only
if platform == 'win32': if platform == 'win32':
locale.setlocale(locale.LC_ALL, str('French')) locale.setlocale(locale.LC_ALL, str('French'))
else: else:
locale.setlocale(locale.LC_ALL, str('fr_FR.UTF-8')) locale.setlocale(locale.LC_ALL, str('fr_FR.UTF-8'))
date = utils.SafeDatetime(2014,8,14) date = utils.SafeDatetime(2014, 8, 14)
# we compare the lower() dates since macos10 returns "Jeudi" for %A whereas linux reports "jeudi" # we compare the lower() dates since macos10 returns
self.assertEqual( u'jeudi, 14 août 2014', utils.strftime(date, date_format="%A, %d %B %Y").lower() ) # "Jeudi" for %A whereas linux reports "jeudi"
self.assertEqual(
u'jeudi, 14 août 2014',
utils.strftime(date, date_format="%A, %d %B %Y").lower())
df = utils.DateFormatter() df = utils.DateFormatter()
self.assertEqual( u'jeudi, 14 août 2014', df(date, date_format="%A, %d %B %Y").lower() ) self.assertEqual(
u'jeudi, 14 août 2014',
df(date, date_format="%A, %d %B %Y").lower())
# Let us now set the global locale to C: # Let us now set the global locale to C:
locale.setlocale(locale.LC_ALL, str('C')) locale.setlocale(locale.LC_ALL, str('C'))
# DateFormatter should still work as expected since it is the whole point of DateFormatter # DateFormatter should still work as expected
# since it is the whole point of DateFormatter
# (This is where pre-2014/4/15 code fails on macos10) # (This is where pre-2014/4/15 code fails on macos10)
df_date = df(date, date_format="%A, %d %B %Y").lower() df_date = df(date, date_format="%A, %d %B %Y").lower()
self.assertEqual( u'jeudi, 14 août 2014', df_date ) self.assertEqual(u'jeudi, 14 août 2014', df_date)
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or @unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'), locale_available('French'),
@ -530,9 +546,12 @@ class TestDateFormatter(unittest.TestCase):
else: else:
locale_string = 'fr_FR.UTF-8' locale_string = 'fr_FR.UTF-8'
settings = read_settings( settings = read_settings(
override = {'LOCALE': locale_string, override={
'TEMPLATE_PAGES': {'template/source.html': 'LOCALE': locale_string,
'generated/file.html'}}) 'TEMPLATE_PAGES': {
'template/source.html': 'generated/file.html'
}
})
generator = TemplatePagesGenerator( generator = TemplatePagesGenerator(
{'date': self.date}, settings, {'date': self.date}, settings,
@ -543,7 +562,7 @@ class TestDateFormatter(unittest.TestCase):
generator.generate_output(writer) generator.generate_output(writer)
output_path = os.path.join( output_path = os.path.join(
self.temp_output, 'generated', 'file.html') self.temp_output, 'generated', 'file.html')
# output file has been generated # output file has been generated
self.assertTrue(os.path.exists(output_path)) self.assertTrue(os.path.exists(output_path))
@ -553,7 +572,6 @@ class TestDateFormatter(unittest.TestCase):
self.assertEqual(output_file, self.assertEqual(output_file,
utils.strftime(self.date, 'date = %A, %d %B %Y')) utils.strftime(self.date, 'date = %A, %d %B %Y'))
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or @unittest.skipUnless(locale_available('tr_TR.UTF-8') or
locale_available('Turkish'), locale_available('Turkish'),
'Turkish locale needed') 'Turkish locale needed')
@ -563,9 +581,12 @@ class TestDateFormatter(unittest.TestCase):
else: else:
locale_string = 'tr_TR.UTF-8' locale_string = 'tr_TR.UTF-8'
settings = read_settings( settings = read_settings(
override = {'LOCALE': locale_string, override={
'TEMPLATE_PAGES': {'template/source.html': 'LOCALE': locale_string,
'generated/file.html'}}) 'TEMPLATE_PAGES': {
'template/source.html': 'generated/file.html'
}
})
generator = TemplatePagesGenerator( generator = TemplatePagesGenerator(
{'date': self.date}, settings, {'date': self.date}, settings,
@ -576,7 +597,7 @@ class TestDateFormatter(unittest.TestCase):
generator.generate_output(writer) generator.generate_output(writer)
output_path = os.path.join( output_path = os.path.join(
self.temp_output, 'generated', 'file.html') self.temp_output, 'generated', 'file.html')
# output file has been generated # output file has been generated
self.assertTrue(os.path.exists(output_path)) self.assertTrue(os.path.exists(output_path))

View file

@ -1,29 +1,30 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import argparse import argparse
try: import logging
from html import unescape # py3.4+
except ImportError:
from six.moves.html_parser import HTMLParser
unescape = HTMLParser().unescape
import os import os
import re import re
import subprocess import subprocess
import sys import sys
import time import time
import logging
from codecs import open from codecs import open
from six.moves.urllib.error import URLError from six.moves.urllib.error import URLError
from six.moves.urllib.parse import urlparse from six.moves.urllib.parse import urlparse
from six.moves.urllib.request import urlretrieve from six.moves.urllib.request import urlretrieve
# pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger # because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init from pelican.log import init
from pelican.utils import slugify, SafeDatetime from pelican.utils import SafeDatetime, slugify
try:
from html import unescape # py3.4+
except ImportError:
from six.moves.html_parser import HTMLParser
unescape = HTMLParser().unescape
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -70,12 +71,19 @@ def decode_wp_content(content, br=True):
content = "" content = ""
for p in pgraphs: for p in pgraphs:
content = content + "<p>" + p.strip() + "</p>\n" content = content + "<p>" + p.strip() + "</p>\n"
# under certain strange conditions it could create a P of entirely whitespace # under certain strange conditions it could create
# a P of entirely whitespace
content = re.sub(r'<p>\s*</p>', '', content) content = re.sub(r'<p>\s*</p>', '', content)
content = re.sub(r'<p>([^<]+)</(div|address|form)>', "<p>\\1</p></\\2>", content) content = re.sub(
r'<p>([^<]+)</(div|address|form)>',
"<p>\\1</p></\\2>",
content)
# don't wrap tags # don't wrap tags
content = re.sub(r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>', "\\1", content) content = re.sub(
#problem with nested lists r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>',
"\\1",
content)
# problem with nested lists
content = re.sub(r'<p>(<li.*)</p>', "\\1", content) content = re.sub(r'<p>(<li.*)</p>', "\\1", content)
content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content) content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content)
content = content.replace('</blockquote></p>', '</p></blockquote>') content = content.replace('</blockquote></p>', '</p></blockquote>')
@ -84,12 +92,20 @@ def decode_wp_content(content, br=True):
if br: if br:
def _preserve_newline(match): def _preserve_newline(match):
return match.group(0).replace("\n", "<WPPreserveNewline />") return match.group(0).replace("\n", "<WPPreserveNewline />")
content = re.sub(r'/<(script|style).*?<\/\\1>/s', _preserve_newline, content) content = re.sub(
r'/<(script|style).*?<\/\\1>/s',
_preserve_newline,
content)
# optionally make line breaks # optionally make line breaks
content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content) content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content)
content = content.replace("<WPPreserveNewline />", "\n") content = content.replace("<WPPreserveNewline />", "\n")
content = re.sub(r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1", content) content = re.sub(
content = re.sub(r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)', '\\1', content) r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1",
content)
content = re.sub(
r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)',
'\\1',
content)
content = re.sub(r'\n</p>', "</p>", content) content = re.sub(r'\n</p>', "</p>", content)
if pre_tags: if pre_tags:
@ -100,13 +116,14 @@ def decode_wp_content(content, br=True):
return content return content
def get_items(xml): def get_items(xml):
"""Opens a WordPress xml file and returns a list of items""" """Opens a WordPress xml file and returns a list of items"""
try: try:
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
except ImportError: except ImportError:
error = ('Missing dependency ' error = ('Missing dependency "BeautifulSoup4" and "lxml" required to '
'"BeautifulSoup4" and "lxml" required to import WordPress XML files.') 'import WordPress XML files.')
sys.exit(error) sys.exit(error)
with open(xml, encoding='utf-8') as infile: with open(xml, encoding='utf-8') as infile:
xmlfile = infile.read() xmlfile = infile.read()
@ -114,12 +131,14 @@ def get_items(xml):
items = soup.rss.channel.findAll('item') items = soup.rss.channel.findAll('item')
return items return items
def get_filename(filename, post_id): def get_filename(filename, post_id):
if filename is not None: if filename is not None:
return filename return filename
else: else:
return post_id return post_id
def wp2fields(xml, wp_custpost=False): def wp2fields(xml, wp_custpost=False):
"""Opens a wordpress XML file, and yield Pelican fields""" """Opens a wordpress XML file, and yield Pelican fields"""
@ -141,16 +160,18 @@ def wp2fields(xml, wp_custpost=False):
content = item.find('encoded').string content = item.find('encoded').string
raw_date = item.find('post_date').string raw_date = item.find('post_date').string
date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S") date_object = time.strptime(raw_date, '%Y-%m-%d %H:%M:%S')
date = time.strftime("%Y-%m-%d %H:%M", date_object) date = time.strftime('%Y-%m-%d %H:%M', date_object)
author = item.find('creator').string author = item.find('creator').string
categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})] categories = [cat.string for cat
# caturl = [cat['nicename'] for cat in item.find(domain='category')] in item.findAll('category', {'domain': 'category'})]
tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})] tags = [tag.string for tag
in item.findAll('category', {'domain': 'post_tag'})]
# To publish a post the status should be 'published' # To publish a post the status should be 'published'
status = 'published' if item.find('status').string == "publish" else item.find('status').string status = 'published' if item.find('status').string == "publish" \
else item.find('status').string
kind = 'article' kind = 'article'
post_type = item.find('post_type').string post_type = item.find('post_type').string
@ -159,16 +180,17 @@ def wp2fields(xml, wp_custpost=False):
elif wp_custpost: elif wp_custpost:
if post_type == 'post': if post_type == 'post':
pass pass
# Old behaviour was to name everything not a page as an article. # Old behaviour was to name everything not a page as an
# Theoretically all attachments have status == inherit so # article.Theoretically all attachments have status == inherit
# no attachments should be here. But this statement is to # so no attachments should be here. But this statement is to
# maintain existing behaviour in case that doesn't hold true. # maintain existing behaviour in case that doesn't hold true.
elif post_type == 'attachment': elif post_type == 'attachment':
pass pass
else: else:
kind = post_type kind = post_type
yield (title, content, filename, date, author, categories, tags, status, yield (title, content, filename, date, author, categories,
kind, "wp-html") tags, status, kind, 'wp-html')
def dc2fields(file): def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields""" """Opens a Dotclear export file, and yield pelican fields"""
@ -176,10 +198,10 @@ def dc2fields(file):
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
except ImportError: except ImportError:
error = ('Missing dependency ' error = ('Missing dependency '
'"BeautifulSoup4" and "lxml" required to import Dotclear files.') '"BeautifulSoup4" and "lxml" required '
'to import Dotclear files.')
sys.exit(error) sys.exit(error)
in_cat = False in_cat = False
in_post = False in_post = False
category_list = {} category_list = {}
@ -203,7 +225,7 @@ def dc2fields(file):
# remove 1st and last "" # remove 1st and last ""
fields[0] = fields[0][1:] fields[0] = fields[0][1:]
# fields[-1] = fields[-1][:-1] # fields[-1] = fields[-1][:-1]
category_list[fields[0]]=fields[2] category_list[fields[0]] = fields[2]
elif in_post: elif in_post:
if not line: if not line:
in_post = False in_post = False
@ -249,45 +271,50 @@ def dc2fields(file):
# remove seconds # remove seconds
post_creadt = ':'.join(post_creadt.split(':')[0:2]) post_creadt = ':'.join(post_creadt.split(':')[0:2])
author = "" author = ''
categories = [] categories = []
tags = [] tags = []
if cat_id: if cat_id:
categories = [category_list[id].strip() for id in cat_id.split(',')] categories = [category_list[id].strip() for id
in cat_id.split(',')]
# Get tags related to a post # Get tags related to a post
tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\"tag\\";a:', '').replace('a:0:', '') tag = (post_meta.replace('{', '')
.replace('}', '')
.replace('a:1:s:3:\\"tag\\";a:', '')
.replace('a:0:', ''))
if len(tag) > 1: if len(tag) > 1:
if int(tag[:1]) == 1: if int(tag[:1]) == 1:
newtag = tag.split('"')[1] newtag = tag.split('"')[1]
tags.append( tags.append(
BeautifulSoup( BeautifulSoup(
newtag newtag,
, "xml" 'xml'
) )
# bs4 always outputs UTF-8 # bs4 always outputs UTF-8
.decode('utf-8') .decode('utf-8')
) )
else: else:
i=1 i = 1
j=1 j = 1
while(i <= int(tag[:1])): while(i <= int(tag[:1])):
newtag = tag.split('"')[j].replace('\\','') newtag = tag.split('"')[j].replace('\\', '')
tags.append( tags.append(
BeautifulSoup( BeautifulSoup(
newtag newtag,
, "xml" 'xml'
) )
# bs4 always outputs UTF-8 # bs4 always outputs UTF-8
.decode('utf-8') .decode('utf-8')
) )
i=i+1 i = i + 1
if j < int(tag[:1])*2: if j < int(tag[:1]) * 2:
j=j+2 j = j + 2
""" """
dotclear2 does not use markdown by default unless you use the markdown plugin dotclear2 does not use markdown by default unless
you use the markdown plugin
Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
""" """
if post_format == "markdown": if post_format == "markdown":
@ -322,12 +349,13 @@ def posterous2fields(api_token, email, password):
# py2 import # py2 import
import urllib2 as urllib_request import urllib2 as urllib_request
def get_posterous_posts(api_token, email, password, page=1):
def get_posterous_posts(api_token, email, password, page = 1): base64string = base64.encodestring(
base64string = base64.encodestring(("%s:%s" % (email, password)).encode('utf-8')).replace(b'\n', b'') ("%s:%s" % (email, password)).encode('utf-8')).replace('\n', '')
url = "http://posterous.com/api/v2/users/me/sites/primary/posts?api_token=%s&page=%d" % (api_token, page) url = ("http://posterous.com/api/v2/users/me/sites/primary/"
"posts?api_token=%s&page=%d") % (api_token, page)
request = urllib_request.Request(url) request = urllib_request.Request(url)
request.add_header("Authorization", "Basic %s" % base64string.decode()) request.add_header('Authorization', 'Basic %s' % base64string.decode())
handle = urllib_request.urlopen(request) handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8')) posts = json.loads(handle.read().decode('utf-8'))
return posts return posts
@ -344,16 +372,18 @@ def posterous2fields(api_token, email, password):
slug = slugify(post.get('title')) slug = slugify(post.get('title'))
tags = [tag.get('name') for tag in post.get('tags')] tags = [tag.get('name') for tag in post.get('tags')]
raw_date = post.get('display_date') raw_date = post.get('display_date')
date_object = SafeDatetime.strptime(raw_date[:-6], "%Y/%m/%d %H:%M:%S") date_object = SafeDatetime.strptime(
raw_date[:-6], '%Y/%m/%d %H:%M:%S')
offset = int(raw_date[-5:]) offset = int(raw_date[-5:])
delta = timedelta(hours = offset / 100) delta = timedelta(hours=(offset / 100))
date_object -= delta date_object -= delta
date = date_object.strftime("%Y-%m-%d %H:%M") date = date_object.strftime('%Y-%m-%d %H:%M')
kind = 'article' # TODO: Recognise pages kind = 'article' # TODO: Recognise pages
status = 'published' # TODO: Find a way for draft posts status = 'published' # TODO: Find a way for draft posts
yield (post.get('title'), post.get('body_cleaned'), slug, date, yield (post.get('title'), post.get('body_cleaned'),
post.get('user').get('display_name'), [], tags, status, kind, "html") slug, date, post.get('user').get('display_name'),
[], tags, status, kind, 'html')
def tumblr2fields(api_key, blogname): def tumblr2fields(api_key, blogname):
@ -374,7 +404,9 @@ def tumblr2fields(api_key, blogname):
import urllib2 as urllib_request import urllib2 as urllib_request
def get_tumblr_posts(api_key, blogname, offset=0): def get_tumblr_posts(api_key, blogname, offset=0):
url = "http://api.tumblr.com/v2/blog/%s.tumblr.com/posts?api_key=%s&offset=%d&filter=raw" % (blogname, api_key, offset) url = ("http://api.tumblr.com/v2/blog/%s.tumblr.com/"
"posts?api_key=%s&offset=%d&filter=raw") % (
blogname, api_key, offset)
request = urllib_request.Request(url) request = urllib_request.Request(url)
handle = urllib_request.urlopen(request) handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8')) posts = json.loads(handle.read().decode('utf-8'))
@ -384,7 +416,10 @@ def tumblr2fields(api_key, blogname):
posts = get_tumblr_posts(api_key, blogname, offset) posts = get_tumblr_posts(api_key, blogname, offset)
while len(posts) > 0: while len(posts) > 0:
for post in posts: for post in posts:
title = post.get('title') or post.get('source_title') or post.get('type').capitalize() title = \
post.get('title') or \
post.get('source_title') or \
post.get('type').capitalize()
slug = post.get('slug') or slugify(title) slug = post.get('slug') or slugify(title)
tags = post.get('tags') tags = post.get('tags')
timestamp = post.get('timestamp') timestamp = post.get('timestamp')
@ -398,7 +433,11 @@ def tumblr2fields(api_key, blogname):
fmtstr = '![%s](%s)' fmtstr = '![%s](%s)'
else: else:
fmtstr = '<img alt="%s" src="%s" />' fmtstr = '<img alt="%s" src="%s" />'
content = '\n'.join(fmtstr % (photo.get('caption'), photo.get('original_size').get('url')) for photo in post.get('photos')) content = ''
for photo in post.get('photos'):
content += '\n'.join(
fmtstr % (photo.get('caption'),
photo.get('original_size').get('url')))
content += '\n\n' + post.get('caption') content += '\n\n' + post.get('caption')
elif type == 'quote': elif type == 'quote':
if format == 'markdown': if format == 'markdown':
@ -417,16 +456,29 @@ def tumblr2fields(api_key, blogname):
fmtstr = '[via](%s)\n\n' fmtstr = '[via](%s)\n\n'
else: else:
fmtstr = '<p><a href="%s">via</a></p>\n' fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('source_url') + post.get('caption') + post.get('player') content = fmtstr % post.get('source_url') + \
post.get('caption') + \
post.get('player')
elif type == 'video': elif type == 'video':
if format == 'markdown': if format == 'markdown':
fmtstr = '[via](%s)\n\n' fmtstr = '[via](%s)\n\n'
else: else:
fmtstr = '<p><a href="%s">via</a></p>\n' fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('source_url') + post.get('caption') + '\n'.join(player.get('embed_code') for player in post.get('player')) source = fmtstr % post.get('source_url')
caption = post.get('caption')
players = '\n'.join(player.get('embed_code')
for player in post.get('player'))
content = source + caption + players
elif type == 'answer': elif type == 'answer':
title = post.get('question') title = post.get('question')
content = '<p><a href="%s" rel="external nofollow">%s</a>: %s</p>\n%s' % (post.get('asking_name'), post.get('asking_url'), post.get('question'), post.get('answer')) content = ('<p>'
'<a href="%s" rel="external nofollow">%s</a>'
': %s'
'</p>\n'
' %s' % (post.get('asking_name'),
post.get('asking_url'),
post.get('question'),
post.get('answer')))
content = content.rstrip() + '\n' content = content.rstrip() + '\n'
kind = 'article' kind = 'article'
@ -438,25 +490,30 @@ def tumblr2fields(api_key, blogname):
offset += len(posts) offset += len(posts)
posts = get_tumblr_posts(api_key, blogname, offset) posts = get_tumblr_posts(api_key, blogname, offset)
def feed2fields(file): def feed2fields(file):
"""Read a feed and yield pelican fields""" """Read a feed and yield pelican fields"""
import feedparser import feedparser
d = feedparser.parse(file) d = feedparser.parse(file)
for entry in d.entries: for entry in d.entries:
date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed) date = (time.strftime('%Y-%m-%d %H:%M', entry.updated_parsed)
if hasattr(entry, "updated_parsed") else None) if hasattr(entry, 'updated_parsed') else None)
author = entry.author if hasattr(entry, "author") else None author = entry.author if hasattr(entry, 'author') else None
tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None tags = ([e['term'] for e in entry.tags]
if hasattr(entry, 'tags') else None)
slug = slugify(entry.title) slug = slugify(entry.title)
kind = 'article' kind = 'article'
yield (entry.title, entry.description, slug, date, author, [], tags, None, yield (entry.title, entry.description, slug, date,
kind, "html") author, [], tags, None, kind, 'html')
def build_header(title, date, author, categories, tags, slug,
status=None, attachments=None):
"""Build a header from a list of fields"""
def build_header(title, date, author, categories, tags, slug, status=None, attachments=None):
from docutils.utils import column_width from docutils.utils import column_width
"""Build a header from a list of fields"""
header = '%s\n%s\n' % (title, '#' * column_width(title)) header = '%s\n%s\n' % (title, '#' * column_width(title))
if date: if date:
header += ':date: %s\n' % date header += ':date: %s\n' % date
@ -475,8 +532,9 @@ def build_header(title, date, author, categories, tags, slug, status=None, attac
header += '\n' header += '\n'
return header return header
def build_markdown_header(title, date, author, categories, tags, slug, status=None,
attachments=None): def build_markdown_header(title, date, author, categories, tags,
slug, status=None, attachments=None):
"""Build a header from a list of fields""" """Build a header from a list of fields"""
header = 'Title: %s\n' % title header = 'Title: %s\n' % title
if date: if date:
@ -496,6 +554,7 @@ def build_markdown_header(title, date, author, categories, tags, slug, status=No
header += '\n' header += '\n'
return header return header
def get_ext(out_markup, in_markup='html'): def get_ext(out_markup, in_markup='html'):
if in_markup == 'markdown' or out_markup == 'markdown': if in_markup == 'markdown' or out_markup == 'markdown':
ext = '.md' ext = '.md'
@ -503,26 +562,27 @@ def get_ext(out_markup, in_markup='html'):
ext = '.rst' ext = '.rst'
return ext return ext
def get_out_filename(output_path, filename, ext, kind, def get_out_filename(output_path, filename, ext, kind,
dirpage, dircat, categories, wp_custpost): dirpage, dircat, categories, wp_custpost):
filename = os.path.basename(filename) filename = os.path.basename(filename)
# Enforce filename restrictions for various filesystems at once; see # Enforce filename restrictions for various filesystems at once; see
# http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words # http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
# we do not need to filter words because an extension will be appended # we do not need to filter words because an extension will be appended
filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
filename = filename.lstrip('.') # should not start with a dot filename = filename.lstrip('.') # should not start with a dot
if not filename: if not filename:
filename = '_' filename = '_'
filename = filename[:249] # allow for 5 extra characters filename = filename[:249] # allow for 5 extra characters
out_filename = os.path.join(output_path, filename+ext) out_filename = os.path.join(output_path, filename + ext)
# option to put page posts in pages/ subdirectory # option to put page posts in pages/ subdirectory
if dirpage and kind == 'page': if dirpage and kind == 'page':
pages_dir = os.path.join(output_path, 'pages') pages_dir = os.path.join(output_path, 'pages')
if not os.path.isdir(pages_dir): if not os.path.isdir(pages_dir):
os.mkdir(pages_dir) os.mkdir(pages_dir)
out_filename = os.path.join(pages_dir, filename+ext) out_filename = os.path.join(pages_dir, filename + ext)
elif not dirpage and kind == 'page': elif not dirpage and kind == 'page':
pass pass
# option to put wp custom post types in directories with post type # option to put wp custom post types in directories with post type
@ -539,18 +599,19 @@ def get_out_filename(output_path, filename, ext, kind,
else: else:
catname = '' catname = ''
out_filename = os.path.join(output_path, typename, out_filename = os.path.join(output_path, typename,
catname, filename+ext) catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, typename, catname)): if not os.path.isdir(os.path.join(output_path, typename, catname)):
os.makedirs(os.path.join(output_path, typename, catname)) os.makedirs(os.path.join(output_path, typename, catname))
# option to put files in directories with categories names # option to put files in directories with categories names
elif dircat and (len(categories) > 0): elif dircat and (len(categories) > 0):
catname = slugify(categories[0]) catname = slugify(categories[0])
out_filename = os.path.join(output_path, catname, filename+ext) out_filename = os.path.join(output_path, catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, catname)): if not os.path.isdir(os.path.join(output_path, catname)):
os.mkdir(os.path.join(output_path, catname)) os.mkdir(os.path.join(output_path, catname))
return out_filename return out_filename
def get_attachments(xml): def get_attachments(xml):
"""returns a dictionary of posts that have attachments with a list """returns a dictionary of posts that have attachments with a list
of the attachment_urls of the attachment_urls
@ -566,7 +627,7 @@ def get_attachments(xml):
if kind == 'attachment': if kind == 'attachment':
attachments.append((item.find('post_parent').string, attachments.append((item.find('post_parent').string,
item.find('attachment_url').string)) item.find('attachment_url').string))
else: else:
filename = get_filename(filename, post_id) filename = get_filename(filename, post_id)
names[post_id] = filename names[post_id] = filename
@ -575,7 +636,7 @@ def get_attachments(xml):
try: try:
parent_name = names[parent] parent_name = names[parent]
except KeyError: except KeyError:
#attachment's parent is not a valid post # attachment's parent is not a valid post
parent_name = None parent_name = None
try: try:
@ -585,6 +646,7 @@ def get_attachments(xml):
attachedposts[parent_name].append(url) attachedposts[parent_name].append(url)
return attachedposts return attachedposts
def download_attachments(output_path, urls): def download_attachments(output_path, urls):
"""Downloads WordPress attachments and returns a list of paths to """Downloads WordPress attachments and returns a list of paths to
attachments that can be associated with a post (relative path to output attachments that can be associated with a post (relative path to output
@ -592,8 +654,8 @@ def download_attachments(output_path, urls):
locations = [] locations = []
for url in urls: for url in urls:
path = urlparse(url).path path = urlparse(url).path
#teardown path and rebuild to negate any errors with # teardown path and rebuild to negate any errors with
#os.path.join and leading /'s # os.path.join and leading /'s
path = path.split('/') path = path.split('/')
filename = path.pop(-1) filename = path.pop(-1)
localpath = '' localpath = ''
@ -608,12 +670,13 @@ def download_attachments(output_path, urls):
urlretrieve(url, os.path.join(full_path, filename)) urlretrieve(url, os.path.join(full_path, filename))
locations.append(os.path.join(localpath, filename)) locations.append(os.path.join(localpath, filename))
except (URLError, IOError) as e: except (URLError, IOError) as e:
#Python 2.7 throws an IOError rather Than URLError # Python 2.7 throws an IOError rather Than URLError
logger.warning("No file could be downloaded from %s\n%s", url, e) logger.warning("No file could be downloaded from %s\n%s", url, e)
return locations return locations
def fields2pelican(fields, out_markup, output_path, def fields2pelican(
fields, out_markup, output_path,
dircat=False, strip_raw=False, disable_slugs=False, dircat=False, strip_raw=False, disable_slugs=False,
dirpage=False, filename_template=None, filter_author=None, dirpage=False, filename_template=None, filter_author=None,
wp_custpost=False, wp_attach=False, attachments=None): wp_custpost=False, wp_attach=False, attachments=None):
@ -634,24 +697,26 @@ def fields2pelican(fields, out_markup, output_path,
ext = get_ext(out_markup, in_markup) ext = get_ext(out_markup, in_markup)
if ext == '.md': if ext == '.md':
header = build_markdown_header(title, date, author, categories, header = build_markdown_header(
tags, slug, status, attached_files) title, date, author, categories, tags, slug,
status, attached_files)
else: else:
out_markup = "rst" out_markup = 'rst'
header = build_header(title, date, author, categories, header = build_header(title, date, author, categories,
tags, slug, status, attached_files) tags, slug, status, attached_files)
out_filename = get_out_filename(output_path, filename, ext, out_filename = get_out_filename(
kind, dirpage, dircat, categories, wp_custpost) output_path, filename, ext, kind, dirpage, dircat,
categories, wp_custpost)
print(out_filename) print(out_filename)
if in_markup in ("html", "wp-html"): if in_markup in ('html', 'wp-html'):
html_filename = os.path.join(output_path, filename+'.html') html_filename = os.path.join(output_path, filename + '.html')
with open(html_filename, 'w', encoding='utf-8') as fp: with open(html_filename, 'w', encoding='utf-8') as fp:
# Replace newlines with paragraphs wrapped with <p> so # Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion # HTML is valid before conversion
if in_markup == "wp-html": if in_markup == 'wp-html':
new_content = decode_wp_content(content) new_content = decode_wp_content(content)
else: else:
paragraphs = content.splitlines() paragraphs = content.splitlines()
@ -660,79 +725,95 @@ def fields2pelican(fields, out_markup, output_path,
fp.write(new_content) fp.write(new_content)
parse_raw = '--parse-raw' if not strip_raw else '' parse_raw = '--parse-raw' if not strip_raw else ''
cmd = ('pandoc --normalize {0} --from=html' cmd = ('pandoc --normalize {0} --from=html'
' --to={1} -o "{2}" "{3}"').format( ' --to={1} -o "{2}" "{3}"')
parse_raw, out_markup, out_filename, html_filename) cmd = cmd.format(parse_raw, out_markup,
out_filename, html_filename)
try: try:
rc = subprocess.call(cmd, shell=True) rc = subprocess.call(cmd, shell=True)
if rc < 0: if rc < 0:
error = "Child was terminated by signal %d" % -rc error = 'Child was terminated by signal %d' % -rc
exit(error) exit(error)
elif rc > 0: elif rc > 0:
error = "Please, check your Pandoc installation." error = 'Please, check your Pandoc installation.'
exit(error) exit(error)
except OSError as e: except OSError as e:
error = "Pandoc execution failed: %s" % e error = 'Pandoc execution failed: %s' % e
exit(error) exit(error)
os.remove(html_filename) os.remove(html_filename)
with open(out_filename, 'r', encoding='utf-8') as fs: with open(out_filename, 'r', encoding='utf-8') as fs:
content = fs.read() content = fs.read()
if out_markup == "markdown": if out_markup == 'markdown':
# In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line # In markdown, to insert a <br />, end a line with two
content = content.replace("\\\n ", " \n") # or more spaces & then a end-of-line
content = content.replace("\\\n", " \n") content = content.replace('\\\n ', ' \n')
content = content.replace('\\\n', ' \n')
with open(out_filename, 'w', encoding='utf-8') as fs: with open(out_filename, 'w', encoding='utf-8') as fs:
fs.write(header + content) fs.write(header + content)
if wp_attach and attachments and None in attachments: if wp_attach and attachments and None in attachments:
print("downloading attachments that don't have a parent post") print("downloading attachments that don't have a parent post")
urls = attachments[None] urls = attachments[None]
orphan_galleries = download_attachments(output_path, urls) download_attachments(output_path, urls)
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Transform feed, WordPress, Tumblr, Dotclear, or Posterous " description="Transform feed, WordPress, Tumblr, Dotclear, or "
"files into reST (rst) or Markdown (md) files. Be sure to " "Posterous files into reST (rst) or Markdown (md) files. "
"have pandoc installed.", "Be sure to have pandoc installed.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter) formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='input', help='The input file to read') parser.add_argument(
parser.add_argument('--wpfile', action='store_true', dest='wpfile', dest='input', help='The input file to read')
parser.add_argument(
'--wpfile', action='store_true', dest='wpfile',
help='Wordpress XML export') help='Wordpress XML export')
parser.add_argument('--dotclear', action='store_true', dest='dotclear', parser.add_argument(
'--dotclear', action='store_true', dest='dotclear',
help='Dotclear export') help='Dotclear export')
parser.add_argument('--posterous', action='store_true', dest='posterous', parser.add_argument(
'--posterous', action='store_true', dest='posterous',
help='Posterous export') help='Posterous export')
parser.add_argument('--tumblr', action='store_true', dest='tumblr', parser.add_argument(
'--tumblr', action='store_true', dest='tumblr',
help='Tumblr export') help='Tumblr export')
parser.add_argument('--feed', action='store_true', dest='feed', parser.add_argument(
'--feed', action='store_true', dest='feed',
help='Feed to parse') help='Feed to parse')
parser.add_argument('-o', '--output', dest='output', default='output', parser.add_argument(
'-o', '--output', dest='output', default='output',
help='Output path') help='Output path')
parser.add_argument('-m', '--markup', dest='markup', default='rst', parser.add_argument(
'-m', '--markup', dest='markup', default='rst',
help='Output markup format (supports rst & markdown)') help='Output markup format (supports rst & markdown)')
parser.add_argument('--dir-cat', action='store_true', dest='dircat', parser.add_argument(
'--dir-cat', action='store_true', dest='dircat',
help='Put files in directories with categories name') help='Put files in directories with categories name')
parser.add_argument('--dir-page', action='store_true', dest='dirpage', parser.add_argument(
'--dir-page', action='store_true', dest='dirpage',
help=('Put files recognised as pages in "pages/" sub-directory' help=('Put files recognised as pages in "pages/" sub-directory'
' (wordpress import only)')) ' (wordpress import only)'))
parser.add_argument('--filter-author', dest='author', parser.add_argument(
'--filter-author', dest='author',
help='Import only post from the specified author') help='Import only post from the specified author')
parser.add_argument('--strip-raw', action='store_true', dest='strip_raw', parser.add_argument(
'--strip-raw', action='store_true', dest='strip_raw',
help="Strip raw HTML code that can't be converted to " help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)") "markup such as flash embeds or iframes (wordpress import only)")
parser.add_argument('--wp-custpost', action='store_true', parser.add_argument(
'--wp-custpost', action='store_true',
dest='wp_custpost', dest='wp_custpost',
help='Put wordpress custom post types in directories. If used with ' help='Put wordpress custom post types in directories. If used with '
'--dir-cat option directories will be created as ' '--dir-cat option directories will be created as '
'/post_type/category/ (wordpress import only)') '/post_type/category/ (wordpress import only)')
parser.add_argument('--wp-attach', action='store_true', dest='wp_attach', parser.add_argument(
'--wp-attach', action='store_true', dest='wp_attach',
help='(wordpress import only) Download files uploaded to wordpress as ' help='(wordpress import only) Download files uploaded to wordpress as '
'attachments. Files will be added to posts as a list in the post ' 'attachments. Files will be added to posts as a list in the post '
'header. All files will be downloaded, even if ' 'header. All files will be downloaded, even if '
@ -740,16 +821,20 @@ def main():
'with their original path inside the output directory. ' 'with their original path inside the output directory. '
'e.g. output/wp-uploads/date/postname/file.jpg ' 'e.g. output/wp-uploads/date/postname/file.jpg '
'-- Requires an internet connection --') '-- Requires an internet connection --')
parser.add_argument('--disable-slugs', action='store_true', parser.add_argument(
'--disable-slugs', action='store_true',
dest='disable_slugs', dest='disable_slugs',
help='Disable storing slugs from imported posts within output. ' help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent ' 'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.') 'with your original posts.')
parser.add_argument('-e', '--email', dest='email', parser.add_argument(
'-e', '--email', dest='email',
help="Email address (posterous import only)") help="Email address (posterous import only)")
parser.add_argument('-p', '--password', dest='password', parser.add_argument(
'-p', '--password', dest='password',
help="Password (posterous import only)") help="Password (posterous import only)")
parser.add_argument('-b', '--blogname', dest='blogname', parser.add_argument(
'-b', '--blogname', dest='blogname',
help="Blog name (Tumblr import only)") help="Blog name (Tumblr import only)")
args = parser.parse_args() args = parser.parse_args()
@ -766,18 +851,20 @@ def main():
elif args.feed: elif args.feed:
input_type = 'feed' input_type = 'feed'
else: else:
error = "You must provide either --wpfile, --dotclear, --posterous, --tumblr or --feed options" error = ('You must provide either --wpfile, --dotclear, '
'--posterous, --tumblr or --feed options')
exit(error) exit(error)
if not os.path.exists(args.output): if not os.path.exists(args.output):
try: try:
os.mkdir(args.output) os.mkdir(args.output)
except OSError: except OSError:
error = "Unable to create the output folder: " + args.output error = 'Unable to create the output folder: ' + args.output
exit(error) exit(error)
if args.wp_attach and input_type != 'wordpress': if args.wp_attach and input_type != 'wordpress':
error = "You must be importing a wordpress xml to use the --wp-attach option" error = ('You must be importing a wordpress xml '
'to use the --wp-attach option')
exit(error) exit(error)
if input_type == 'wordpress': if input_type == 'wordpress':
@ -796,14 +883,14 @@ def main():
else: else:
attachments = None attachments = None
init() # init logging # init logging
init()
fields2pelican(fields, args.markup, args.output, fields2pelican(fields, args.markup, args.output,
dircat=args.dircat or False, dircat=args.dircat or False,
dirpage=args.dirpage or False, dirpage=args.dirpage or False,
strip_raw=args.strip_raw or False, strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False, disable_slugs=args.disable_slugs or False,
filter_author=args.author, filter_author=args.author,
wp_custpost = args.wp_custpost or False, wp_custpost=args.wp_custpost or False,
wp_attach = args.wp_attach or False, wp_attach=args.wp_attach or False,
attachments = attachments or None) attachments=attachments or None)

View file

@ -1,18 +1,20 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import six
import argparse
import codecs
import os import os
import string import string
import argparse
import sys import sys
import codecs
import pytz import pytz
import six
from pelican import __version__ from pelican import __version__
_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), _TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"templates") "templates")
@ -44,9 +46,10 @@ CONF = {
'timezone': 'Europe/Paris' 'timezone': 'Europe/Paris'
} }
#url for list of valid timezones # url for list of valid timezones
_TZ_URL = 'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones' _TZ_URL = 'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
def _input_compat(prompt): def _input_compat(prompt):
if six.PY3: if six.PY3:
r = input(prompt) r = input(prompt)
@ -59,6 +62,7 @@ if six.PY3:
else: else:
str_compat = unicode str_compat = unicode
# Create a 'marked' default path, to determine if someone has supplied # Create a 'marked' default path, to determine if someone has supplied
# a path on the command-line. # a path on the command-line.
class _DEFAULT_PATH_TYPE(str_compat): class _DEFAULT_PATH_TYPE(str_compat):
@ -66,6 +70,7 @@ class _DEFAULT_PATH_TYPE(str_compat):
_DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir) _DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
def decoding_strings(f): def decoding_strings(f):
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
out = f(*args, **kwargs) out = f(*args, **kwargs)
@ -164,7 +169,8 @@ def ask(question, answer=str_compat, default=None, l=None):
print('You must enter an integer') print('You must enter an integer')
return r return r
else: else:
raise NotImplemented('Argument `answer` must be str_compat, bool, or integer') raise NotImplemented(
'Argument `answer` must be str_compat, bool, or integer')
def ask_timezone(question, default, tzurl): def ask_timezone(question, default, tzurl):
@ -177,7 +183,8 @@ def ask_timezone(question, default, tzurl):
r = pytz.all_timezones[lower_tz.index(r)] r = pytz.all_timezones[lower_tz.index(r)]
break break
else: else:
print('Please enter a valid time zone:\n (check [{0}])'.format(tzurl)) print('Please enter a valid time zone:\n'
' (check [{0}])'.format(tzurl))
return r return r
@ -186,13 +193,13 @@ def main():
description="A kickstarter for Pelican", description="A kickstarter for Pelican",
formatter_class=argparse.ArgumentDefaultsHelpFormatter) formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--path', default=_DEFAULT_PATH, parser.add_argument('-p', '--path', default=_DEFAULT_PATH,
help="The path to generate the blog into") help="The path to generate the blog into")
parser.add_argument('-t', '--title', metavar="title", parser.add_argument('-t', '--title', metavar="title",
help='Set the title of the website') help='Set the title of the website')
parser.add_argument('-a', '--author', metavar="author", parser.add_argument('-a', '--author', metavar="author",
help='Set the author name of the website') help='Set the author name of the website')
parser.add_argument('-l', '--lang', metavar="lang", parser.add_argument('-l', '--lang', metavar="lang",
help='Set the default web site language') help='Set the default web site language')
args = parser.parse_args() args = parser.parse_args()
@ -214,50 +221,94 @@ needed by Pelican.
'Will save to:\n%s\n' % CONF['basedir']) 'Will save to:\n%s\n' % CONF['basedir'])
else: else:
CONF['basedir'] = os.path.abspath(os.path.expanduser( CONF['basedir'] = os.path.abspath(os.path.expanduser(
ask('Where do you want to create your new web site?', answer=str_compat, default=args.path))) ask('Where do you want to create your new web site?',
answer=str_compat, default=args.path)))
CONF['sitename'] = ask('What will be the title of this web site?', answer=str_compat, default=args.title) CONF['sitename'] = ask('What will be the title of this web site?',
CONF['author'] = ask('Who will be the author of this web site?', answer=str_compat, default=args.author) answer=str_compat, default=args.title)
CONF['lang'] = ask('What will be the default language of this web site?', str_compat, args.lang or CONF['lang'], 2) CONF['author'] = ask('Who will be the author of this web site?',
answer=str_compat, default=args.author)
CONF['lang'] = ask('What will be the default language of this web site?',
str_compat, args.lang or CONF['lang'], 2)
if ask('Do you want to specify a URL prefix? e.g., http://example.com ', answer=bool, default=True): if ask('Do you want to specify a URL prefix? e.g., http://example.com ',
CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str_compat, CONF['siteurl']) answer=bool, default=True):
CONF['siteurl'] = ask('What is your URL prefix? (see '
'above example; no trailing slash)',
str_compat, CONF['siteurl'])
CONF['with_pagination'] = ask('Do you want to enable article pagination?', bool, bool(CONF['default_pagination'])) CONF['with_pagination'] = ask('Do you want to enable article pagination?',
bool, bool(CONF['default_pagination']))
if CONF['with_pagination']: if CONF['with_pagination']:
CONF['default_pagination'] = ask('How many articles per page do you want?', int, CONF['default_pagination']) CONF['default_pagination'] = ask('How many articles per page '
'do you want?',
int, CONF['default_pagination'])
else: else:
CONF['default_pagination'] = False CONF['default_pagination'] = False
CONF['timezone'] = ask_timezone('What is your time zone?', CONF['timezone'], _TZ_URL) CONF['timezone'] = ask_timezone('What is your time zone?',
CONF['timezone'], _TZ_URL)
automation = ask('Do you want to generate a Fabfile/Makefile to automate generation and publishing?', bool, True) automation = ask('Do you want to generate a Fabfile/Makefile '
develop = ask('Do you want an auto-reload & simpleHTTP script to assist with theme and site development?', bool, True) 'to automate generation and publishing?', bool, True)
develop = ask('Do you want an auto-reload & simpleHTTP script '
'to assist with theme and site development?', bool, True)
if automation: if automation:
if ask('Do you want to upload your website using FTP?', answer=bool, default=False): if ask('Do you want to upload your website using FTP?',
CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str_compat, CONF['ftp_host']) answer=bool, default=False):
CONF['ftp_user'] = ask('What is your username on that server?', str_compat, CONF['ftp_user']) CONF['ftp_host'] = ask('What is the hostname of your FTP server?',
CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ftp_target_dir']) str_compat, CONF['ftp_host'])
if ask('Do you want to upload your website using SSH?', answer=bool, default=False): CONF['ftp_user'] = ask('What is your username on that server?',
CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str_compat, CONF['ssh_host']) str_compat, CONF['ftp_user'])
CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port']) CONF['ftp_target_dir'] = ask('Where do you want to put your '
CONF['ssh_user'] = ask('What is your username on that server?', str_compat, CONF['ssh_user']) 'web site on that server?',
CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ssh_target_dir']) str_compat, CONF['ftp_target_dir'])
if ask('Do you want to upload your website using Dropbox?', answer=bool, default=False): if ask('Do you want to upload your website using SSH?',
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str_compat, CONF['dropbox_dir']) answer=bool, default=False):
if ask('Do you want to upload your website using S3?', answer=bool, default=False): CONF['ssh_host'] = ask('What is the hostname of your SSH server?',
CONF['s3_bucket'] = ask('What is the name of your S3 bucket?', str_compat, CONF['s3_bucket']) str_compat, CONF['ssh_host'])
if ask('Do you want to upload your website using Rackspace Cloud Files?', answer=bool, default=False): CONF['ssh_port'] = ask('What is the port of your SSH server?',
CONF['cloudfiles_username'] = ask('What is your Rackspace Cloud username?', str_compat, CONF['cloudfiles_username']) int, CONF['ssh_port'])
CONF['cloudfiles_api_key'] = ask('What is your Rackspace Cloud API key?', str_compat, CONF['cloudfiles_api_key']) CONF['ssh_user'] = ask('What is your username on that server?',
CONF['cloudfiles_container'] = ask('What is the name of your Cloud Files container?', str_compat, CONF['cloudfiles_container']) str_compat, CONF['ssh_user'])
if ask('Do you want to upload your website using GitHub Pages?', answer=bool, default=False): CONF['ssh_target_dir'] = ask('Where do you want to put your '
if ask('Is this your personal page (username.github.io)?', answer=bool, default=False): 'web site on that server?',
CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['personal'] str_compat, CONF['ssh_target_dir'])
if ask('Do you want to upload your website using Dropbox?',
answer=bool, default=False):
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',
str_compat, CONF['dropbox_dir'])
if ask('Do you want to upload your website using S3?',
answer=bool, default=False):
CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',
str_compat, CONF['s3_bucket'])
if ask('Do you want to upload your website using '
'Rackspace Cloud Files?', answer=bool, default=False):
CONF['cloudfiles_username'] = ask('What is your Rackspace '
'Cloud username?', str_compat,
CONF['cloudfiles_username'])
CONF['cloudfiles_api_key'] = ask('What is your Rackspace '
'Cloud API key?', str_compat,
CONF['cloudfiles_api_key'])
CONF['cloudfiles_container'] = ask('What is the name of your '
'Cloud Files container?',
str_compat,
CONF['cloudfiles_container'])
if ask('Do you want to upload your website using GitHub Pages?',
answer=bool, default=False):
if ask('Is this your personal page (username.github.io)?',
answer=bool, default=False):
CONF['github_pages_branch'] = \
_GITHUB_PAGES_BRANCHES['personal']
else: else:
CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['project'] CONF['github_pages_branch'] = \
_GITHUB_PAGES_BRANCHES['project']
try: try:
os.makedirs(os.path.join(CONF['basedir'], 'content')) os.makedirs(os.path.join(CONF['basedir'], 'content'))
@ -270,7 +321,8 @@ needed by Pelican.
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
try: try:
with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w', 'utf-8') as fd: with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'),
'w', 'utf-8') as fd:
conf_python = dict() conf_python = dict()
for key, value in CONF.items(): for key, value in CONF.items():
conf_python[key] = repr(value) conf_python[key] = repr(value)
@ -283,7 +335,8 @@ needed by Pelican.
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
try: try:
with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'), 'w', 'utf-8') as fd: with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'),
'w', 'utf-8') as fd:
for line in get_template('publishconf.py'): for line in get_template('publishconf.py'):
template = string.Template(line) template = string.Template(line)
fd.write(template.safe_substitute(CONF)) fd.write(template.safe_substitute(CONF))
@ -293,7 +346,8 @@ needed by Pelican.
if automation: if automation:
try: try:
with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'), 'w', 'utf-8') as fd: with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'),
'w', 'utf-8') as fd:
for line in get_template('fabfile.py'): for line in get_template('fabfile.py'):
template = string.Template(line) template = string.Template(line)
fd.write(template.safe_substitute(CONF)) fd.write(template.safe_substitute(CONF))
@ -301,7 +355,8 @@ needed by Pelican.
except OSError as e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))
try: try:
with codecs.open(os.path.join(CONF['basedir'], 'Makefile'), 'w', 'utf-8') as fd: with codecs.open(os.path.join(CONF['basedir'], 'Makefile'),
'w', 'utf-8') as fd:
mkfile_template_name = 'Makefile' mkfile_template_name = 'Makefile'
py_v = 'PY?=python' py_v = 'PY?=python'
if six.PY3: if six.PY3:
@ -323,7 +378,9 @@ needed by Pelican.
value = '"' + value.replace('"', '\\"') + '"' value = '"' + value.replace('"', '\\"') + '"'
conf_shell[key] = value conf_shell[key] = value
try: try:
with codecs.open(os.path.join(CONF['basedir'], 'develop_server.sh'), 'w', 'utf-8') as fd: with codecs.open(os.path.join(CONF['basedir'],
'develop_server.sh'),
'w', 'utf-8') as fd:
lines = list(get_template('develop_server.sh')) lines = list(get_template('develop_server.sh'))
py_v = 'PY=${PY:-python}\n' py_v = 'PY=${PY:-python}\n'
if six.PY3: if six.PY3:
@ -333,7 +390,10 @@ needed by Pelican.
template = string.Template(line) template = string.Template(line)
fd.write(template.safe_substitute(conf_shell)) fd.write(template.safe_substitute(conf_shell))
fd.close() fd.close()
os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 493) # mode 0o755
# mode 0o755
os.chmod((os.path.join(CONF['basedir'],
'develop_server.sh')), 493)
except OSError as e: except OSError as e:
print('Error: {0}'.format(e)) print('Error: {0}'.format(e))

View file

@ -1,33 +1,12 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import six
import argparse import argparse
import os import os
import shutil import shutil
import sys import sys
try:
import pelican
except:
err('Cannot import pelican.\nYou must install Pelican in order to run this script.', -1)
global _THEMES_PATH
_THEMES_PATH = os.path.join(
os.path.dirname(
os.path.abspath(
pelican.__file__
)
),
'themes'
)
__version__ = '0.2'
_BUILTIN_THEMES = ['simple', 'notmyidea']
def err(msg, die=None): def err(msg, die=None):
"""Print an error message and exits if an exit code is given""" """Print an error message and exits if an exit code is given"""
@ -35,43 +14,71 @@ def err(msg, die=None):
if die: if die:
sys.exit((die if type(die) is int else 1)) sys.exit((die if type(die) is int else 1))
try:
import pelican
except:
err('Cannot import pelican.\nYou must '
'install Pelican in order to run this script.',
-1)
global _THEMES_PATH
_THEMES_PATH = os.path.join(
os.path.dirname(
os.path.abspath(pelican.__file__)
),
'themes'
)
__version__ = '0.2'
_BUILTIN_THEMES = ['simple', 'notmyidea']
def main(): def main():
"""Main function""" """Main function"""
parser = argparse.ArgumentParser(description="""Install themes for Pelican""") parser = argparse.ArgumentParser(
description="""Install themes for Pelican""")
excl= parser.add_mutually_exclusive_group() excl = parser.add_mutually_exclusive_group()
excl.add_argument('-l', '--list', dest='action', action="store_const", const='list', excl.add_argument(
'-l', '--list', dest='action', action="store_const", const='list',
help="Show the themes already installed and exit") help="Show the themes already installed and exit")
excl.add_argument('-p', '--path', dest='action', action="store_const", const='path', excl.add_argument(
'-p', '--path', dest='action', action="store_const", const='path',
help="Show the themes path and exit") help="Show the themes path and exit")
excl.add_argument('-V', '--version', action='version', version='pelican-themes v{0}'.format(__version__), excl.add_argument(
'-V', '--version', action='version',
version='pelican-themes v{0}'.format(__version__),
help='Print the version of this script') help='Print the version of this script')
parser.add_argument(
parser.add_argument('-i', '--install', dest='to_install', nargs='+', metavar="theme path", '-i', '--install', dest='to_install', nargs='+', metavar="theme path",
help='The themes to install') help='The themes to install')
parser.add_argument('-r', '--remove', dest='to_remove', nargs='+', metavar="theme name", parser.add_argument(
'-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
help='The themes to remove') help='The themes to remove')
parser.add_argument('-U', '--upgrade', dest='to_upgrade', nargs='+', parser.add_argument(
metavar="theme path", help='The themes to upgrade') '-U', '--upgrade', dest='to_upgrade', nargs='+',
parser.add_argument('-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path", metavar="theme path", help='The themes to upgrade')
help="Same as `--install', but create a symbolic link instead of copying the theme. Useful for theme development") parser.add_argument(
parser.add_argument('-c', '--clean', dest='clean', action="store_true", '-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
help="Same as `--install', but create a symbolic link instead of "
"copying the theme. Useful for theme development")
parser.add_argument(
'-c', '--clean', dest='clean', action="store_true",
help="Remove the broken symbolic links of the theme path") help="Remove the broken symbolic links of the theme path")
parser.add_argument(
parser.add_argument('-v', '--verbose', dest='verbose', action="store_true", '-v', '--verbose', dest='verbose',
action="store_true",
help="Verbose output") help="Verbose output")
args = parser.parse_args() args = parser.parse_args()
to_install = args.to_install or args.to_upgrade to_install = args.to_install or args.to_upgrade
to_sym = args.to_symlink or args.clean to_sym = args.to_symlink or args.clean
if args.action: if args.action:
if args.action is 'list': if args.action is 'list':
list_themes(args.verbose) list_themes(args.verbose)
@ -95,7 +102,7 @@ def main():
if args.to_upgrade: if args.to_upgrade:
if args.verbose: if args.verbose:
print('Upgrading themes...') print('Upgrading themes...')
for i in args.to_upgrade: for i in args.to_upgrade:
install(i, v=args.verbose, u=True) install(i, v=args.verbose, u=True)
@ -144,11 +151,13 @@ def list_themes(v=False):
def remove(theme_name, v=False): def remove(theme_name, v=False):
"""Removes a theme""" """Removes a theme"""
theme_name = theme_name.replace('/','') theme_name = theme_name.replace('/', '')
target = os.path.join(_THEMES_PATH, theme_name) target = os.path.join(_THEMES_PATH, theme_name)
if theme_name in _BUILTIN_THEMES: if theme_name in _BUILTIN_THEMES:
err(theme_name + ' is a builtin theme.\nYou cannot remove a builtin theme with this script, remove it by hand if you want.') err(theme_name + ' is a builtin theme.\n'
'You cannot remove a builtin theme with this script, '
'remove it by hand if you want.')
elif os.path.islink(target): elif os.path.islink(target):
if v: if v:
print('Removing link `' + target + "'") print('Removing link `' + target + "'")
@ -180,7 +189,8 @@ def install(path, v=False, u=False):
install(path, v) install(path, v)
else: else:
if v: if v:
print("Copying `{p}' to `{t}' ...".format(p=path, t=theme_path)) print("Copying '{p}' to '{t}' ...".format(p=path,
t=theme_path))
try: try:
shutil.copytree(path, theme_path) shutil.copytree(path, theme_path)
@ -189,14 +199,18 @@ def install(path, v=False, u=False):
for root, dirs, files in os.walk(theme_path): for root, dirs, files in os.walk(theme_path):
for d in dirs: for d in dirs:
dname = os.path.join(root, d) dname = os.path.join(root, d)
os.chmod(dname, 493) # 0o755 os.chmod(dname, 493) # 0o755
for f in files: for f in files:
fname = os.path.join(root, f) fname = os.path.join(root, f)
os.chmod(fname, 420) # 0o644 os.chmod(fname, 420) # 0o644
except OSError as e: except OSError as e:
err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False) err("Cannot change permissions of files "
"or directory in `{r}':\n{e}".format(r=theme_path,
e=str(e)),
die=False)
except Exception as e: except Exception as e:
err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e))) err("Cannot copy `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)))
def symlink(path, v=False): def symlink(path, v=False):
@ -212,11 +226,13 @@ def symlink(path, v=False):
err(path + ' : already exists') err(path + ' : already exists')
else: else:
if v: if v:
print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path)) print("Linking `{p}' to `{t}' ...".format(
p=path, t=theme_path))
try: try:
os.symlink(path, theme_path) os.symlink(path, theme_path)
except Exception as e: except Exception as e:
err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e))) err("Cannot link `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)))
def is_broken_link(path): def is_broken_link(path):
@ -227,7 +243,7 @@ def is_broken_link(path):
def clean(v=False): def clean(v=False):
"""Removes the broken symbolic links""" """Removes the broken symbolic links"""
c=0 c = 0
for path in os.listdir(_THEMES_PATH): for path in os.listdir(_THEMES_PATH):
path = os.path.join(_THEMES_PATH, path) path = os.path.join(_THEMES_PATH, path)
if os.path.islink(path): if os.path.islink(path):
@ -236,9 +252,9 @@ def clean(v=False):
print('Removing {0}'.format(path)) print('Removing {0}'.format(path))
try: try:
os.remove(path) os.remove(path)
except OSError as e: except OSError:
print('Error: cannot remove {0}'.format(path)) print('Error: cannot remove {0}'.format(path))
else: else:
c+=1 c += 1
print("\nRemoved {0} broken links".format(c)) print("\nRemoved {0} broken links".format(c))

View file

@ -4,9 +4,10 @@ from __future__ import unicode_literals
import functools import functools
import logging import logging
import os import os
import six import six
from pelican.utils import (slugify, python_2_unicode_compatible) from pelican.utils import python_2_unicode_compatible, slugify
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View file

@ -1,29 +1,30 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function from __future__ import print_function, unicode_literals
import six
import codecs import codecs
import datetime
import errno import errno
import fnmatch import fnmatch
import locale import locale
import logging import logging
import os import os
import pytz
import re import re
import shutil import shutil
import sys import sys
import traceback import traceback
import pickle
import datetime
from collections import Hashable from collections import Hashable
from contextlib import contextmanager from contextlib import contextmanager
import dateutil.parser
from functools import partial from functools import partial
from itertools import groupby from itertools import groupby
from jinja2 import Markup
from operator import attrgetter from operator import attrgetter
from posixpath import join as posix_join
import dateutil.parser
from jinja2 import Markup
import pytz
import six
from six.moves.html_parser import HTMLParser from six.moves.html_parser import HTMLParser
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -43,9 +44,9 @@ def strftime(date, date_format):
formatting them with the date, (if necessary) decoding the output and formatting them with the date, (if necessary) decoding the output and
replacing formatted output back. replacing formatted output back.
''' '''
def strip_zeros(x):
return x.lstrip('0') or '0'
c89_directives = 'aAbBcdfHIjmMpSUwWxXyYzZ%' c89_directives = 'aAbBcdfHIjmMpSUwWxXyYzZ%'
strip_zeros = lambda x: x.lstrip('0') or '0'
# grab candidate format options # grab candidate format options
format_options = '%[-]?.' format_options = '%[-]?.'
@ -200,8 +201,8 @@ def deprecated_attribute(old, new, since=None, remove=None, doc=None):
' and will be removed by version {}'.format(version)) ' and will be removed by version {}'.format(version))
message.append('. Use {} instead.'.format(new)) message.append('. Use {} instead.'.format(new))
logger.warning(''.join(message)) logger.warning(''.join(message))
logger.debug(''.join( logger.debug(''.join(six.text_type(x) for x
six.text_type(x) for x in traceback.format_stack())) in traceback.format_stack()))
def fget(self): def fget(self):
_warn() _warn()
@ -224,7 +225,7 @@ def get_date(string):
""" """
string = re.sub(' +', ' ', string) string = re.sub(' +', ' ', string)
default = SafeDatetime.now().replace(hour=0, minute=0, default = SafeDatetime.now().replace(hour=0, minute=0,
second=0, microsecond=0) second=0, microsecond=0)
try: try:
return dateutil.parser.parse(string, default=default) return dateutil.parser.parse(string, default=default)
except (TypeError, ValueError): except (TypeError, ValueError):
@ -319,12 +320,12 @@ def copy(source, destination, ignores=None):
for src_dir, subdirs, others in os.walk(source_): for src_dir, subdirs, others in os.walk(source_):
dst_dir = os.path.join(destination_, dst_dir = os.path.join(destination_,
os.path.relpath(src_dir, source_)) os.path.relpath(src_dir, source_))
subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i) subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i)
for i in ignores)) for i in ignores))
others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i) others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
for i in ignores)) for i in ignores))
if not os.path.isdir(dst_dir): if not os.path.isdir(dst_dir):
logger.info('Creating directory %s', dst_dir) logger.info('Creating directory %s', dst_dir)
@ -338,9 +339,11 @@ def copy(source, destination, ignores=None):
logger.info('Copying %s to %s', src_path, dst_path) logger.info('Copying %s to %s', src_path, dst_path)
shutil.copy2(src_path, dst_path) shutil.copy2(src_path, dst_path)
else: else:
logger.warning('Skipped copy %s (not a file or directory) to %s', logger.warning('Skipped copy %s (not a file or '
'directory) to %s',
src_path, dst_path) src_path, dst_path)
def clean_output_dir(path, retention): def clean_output_dir(path, retention):
"""Remove all files from output directory except those in retention list""" """Remove all files from output directory except those in retention list"""
@ -366,8 +369,8 @@ def clean_output_dir(path, retention):
shutil.rmtree(file) shutil.rmtree(file)
logger.debug("Deleted directory %s", file) logger.debug("Deleted directory %s", file)
except Exception as e: except Exception as e:
logger.error("Unable to delete directory %s; %s", logger.error("Unable to delete directory %s; %s",
file, e) file, e)
elif os.path.isfile(file) or os.path.islink(file): elif os.path.isfile(file) or os.path.islink(file):
try: try:
os.remove(file) os.remove(file)
@ -507,12 +510,12 @@ def process_translations(content_list, order_by=None):
for slug, items in grouped_by_slugs: for slug, items in grouped_by_slugs:
items = list(items) items = list(items)
# items with `translation` metadata will be used as translations # items with `translation` metadata will be used as translations...
default_lang_items = list(filter( default_lang_items = list(filter(
lambda i: i.metadata.get('translation', 'false').lower() lambda i:
== 'false', i.metadata.get('translation', 'false').lower() == 'false',
items)) items))
# unless all items with that slug are translations # ...unless all items with that slug are translations
if not default_lang_items: if not default_lang_items:
default_lang_items = items default_lang_items = items
@ -522,13 +525,14 @@ def process_translations(content_list, order_by=None):
len_ = len(lang_items) len_ = len(lang_items)
if len_ > 1: if len_ > 1:
logger.warning('There are %s variants of "%s" with lang %s', logger.warning('There are %s variants of "%s" with lang %s',
len_, slug, lang) len_, slug, lang)
for x in lang_items: for x in lang_items:
logger.warning('\t%s', x.source_path) logger.warning('\t%s', x.source_path)
# find items with default language # find items with default language
default_lang_items = list(filter(attrgetter('in_default_lang'), default_lang_items = list(filter(
default_lang_items)) attrgetter('in_default_lang'),
default_lang_items))
# if there is no article with default language, take an other one # if there is no article with default language, take an other one
if not default_lang_items: if not default_lang_items:
@ -536,10 +540,9 @@ def process_translations(content_list, order_by=None):
if not slug: if not slug:
logger.warning( logger.warning(
'empty slug for %s. ' 'Empty slug for %s. You can fix this by '
'You can fix this by adding a title or a slug to your ' 'adding a title or a slug to your content',
'content', default_lang_items[0].source_path)
default_lang_items[0].source_path)
index.extend(default_lang_items) index.extend(default_lang_items)
translations.extend([x for x in items if x not in default_lang_items]) translations.extend([x for x in items if x not in default_lang_items])
for a in items: for a in items:
@ -567,10 +570,12 @@ def process_translations(content_list, order_by=None):
index.sort(key=attrgetter(order_by), index.sort(key=attrgetter(order_by),
reverse=order_reversed) reverse=order_reversed)
except AttributeError: except AttributeError:
logger.warning('There is no "%s" attribute in the item ' logger.warning(
'There is no "%s" attribute in the item '
'metadata. Defaulting to slug order.', order_by) 'metadata. Defaulting to slug order.', order_by)
else: else:
logger.warning('Invalid *_ORDER_BY setting (%s).' logger.warning(
'Invalid *_ORDER_BY setting (%s).'
'Valid options are strings and functions.', order_by) 'Valid options are strings and functions.', order_by)
return index, translations return index, translations
@ -589,12 +594,12 @@ def folder_watcher(path, extensions, ignores=[]):
dirs[:] = [x for x in dirs if not x.startswith(os.curdir)] dirs[:] = [x for x in dirs if not x.startswith(os.curdir)]
for f in files: for f in files:
if (f.endswith(tuple(extensions)) and if f.endswith(tuple(extensions)) and \
not any(fnmatch.fnmatch(f, ignore) for ignore in ignores)): not any(fnmatch.fnmatch(f, ignore) for ignore in ignores):
try: try:
yield os.stat(os.path.join(root, f)).st_mtime yield os.stat(os.path.join(root, f)).st_mtime
except OSError as e: except OSError as e:
logger.warning('Caught Exception: %s', e) logger.warning('Caught Exception: %s', e)
LAST_MTIME = 0 LAST_MTIME = 0
while True: while True:

View file

@ -1,22 +1,24 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import with_statement, unicode_literals, print_function from __future__ import print_function, unicode_literals, with_statement
import six
import os
import logging import logging
import os
from feedgenerator import Atom1Feed, Rss201rev2Feed
from jinja2 import Markup
import six
from six.moves.urllib.parse import urlparse
from pelican import signals
from pelican.paginator import Paginator
from pelican.utils import (get_relative_path, is_selected_for_writing,
path_to_url, set_date_tzinfo)
if not six.PY3: if not six.PY3:
from codecs import open from codecs import open
from feedgenerator import Atom1Feed, Rss201rev2Feed
from jinja2 import Markup
from six.moves.urllib.parse import urlparse
from pelican.paginator import Paginator
from pelican.utils import (get_relative_path, path_to_url, set_date_tzinfo,
is_selected_for_writing)
from pelican import signals
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -119,10 +121,10 @@ class Writer(object):
feed.write(fp, 'utf-8') feed.write(fp, 'utf-8')
logger.info('Writing %s', complete_path) logger.info('Writing %s', complete_path)
signals.feed_written.send(complete_path, context=context, feed=feed) signals.feed_written.send(
complete_path, context=context, feed=feed)
return feed return feed
def write_file(self, name, template, context, relative_urls=False, def write_file(self, name, template, context, relative_urls=False,
paginated=None, override_output=False, **kwargs): paginated=None, override_output=False, **kwargs):
"""Render the template and write the file. """Render the template and write the file.
@ -139,9 +141,10 @@ class Writer(object):
:param **kwargs: additional variables to pass to the templates :param **kwargs: additional variables to pass to the templates
""" """
if name is False or name == "" or\ if name is False or \
not is_selected_for_writing(self.settings,\ name == "" or \
os.path.join(self.output_path, name)): not is_selected_for_writing(self.settings,
os.path.join(self.output_path, name)):
return return
elif not name: elif not name:
# other stuff, just return for now # other stuff, just return for now
@ -169,7 +172,8 @@ class Writer(object):
def _get_localcontext(context, name, kwargs, relative_urls): def _get_localcontext(context, name, kwargs, relative_urls):
localcontext = context.copy() localcontext = context.copy()
localcontext['localsiteurl'] = localcontext.get('localsiteurl', None) localcontext['localsiteurl'] = localcontext.get(
'localsiteurl', None)
if relative_urls: if relative_urls:
relative_url = path_to_url(get_relative_path(name)) relative_url = path_to_url(get_relative_path(name))
localcontext['SITEURL'] = relative_url localcontext['SITEURL'] = relative_url
@ -201,11 +205,13 @@ class Writer(object):
'%s_previous_page' % key: previous_page, '%s_previous_page' % key: previous_page,
'%s_next_page' % key: next_page}) '%s_next_page' % key: next_page})
localcontext = _get_localcontext(context, page.save_as, paginated_kwargs, relative_urls) localcontext = _get_localcontext(
context, page.save_as, paginated_kwargs, relative_urls)
_write_file(template, localcontext, self.output_path, _write_file(template, localcontext, self.output_path,
page.save_as, override_output) page.save_as, override_output)
else: else:
# no pagination # no pagination
localcontext = _get_localcontext(context, name, kwargs, relative_urls) localcontext = _get_localcontext(
context, name, kwargs, relative_urls)
_write_file(template, localcontext, self.output_path, name, _write_file(template, localcontext, self.output_path, name,
override_output) override_output)

View file

@ -38,4 +38,5 @@ deps =
flake8 <= 2.4.1 flake8 <= 2.4.1
git+https://github.com/public/flake8-import-order@2ac7052a4e02b4a8a0125a106d87465a3b9fd688 git+https://github.com/public/flake8-import-order@2ac7052a4e02b4a8a0125a106d87465a3b9fd688
commands = commands =
flake8 --version
flake8 pelican flake8 pelican