mirror of
https://github.com/getpelican/pelican.git
synced 2025-10-15 20:28:56 +02:00
Merge pull request #1753 from ingwinlu/flake8
Make Pelican codebase compliant with PEP8
This commit is contained in:
commit
ca389e70e1
32 changed files with 1273 additions and 869 deletions
|
|
@ -3,6 +3,7 @@ python:
|
|||
- "2.7"
|
||||
env:
|
||||
- TOX_ENV=docs
|
||||
- TOX_ENV=flake8
|
||||
- TOX_ENV=py27
|
||||
- TOX_ENV=py33
|
||||
- TOX_ENV=py34
|
||||
|
|
|
|||
|
|
@ -1,45 +1,41 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
import six
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import argparse
|
||||
import locale
|
||||
import collections
|
||||
|
||||
import six
|
||||
|
||||
# pelican.log has to be the first pelican module to be loaded
|
||||
# because logging.setLoggerClass has to be called before logging.getLogger
|
||||
from pelican.log import init
|
||||
|
||||
from pelican.log import init # noqa
|
||||
from pelican import signals
|
||||
|
||||
from pelican.generators import (ArticlesGenerator, PagesGenerator,
|
||||
StaticGenerator, SourceFileGenerator,
|
||||
SourceFileGenerator, StaticGenerator,
|
||||
TemplatePagesGenerator)
|
||||
from pelican.readers import Readers
|
||||
from pelican.settings import read_settings
|
||||
from pelican.utils import (clean_output_dir, folder_watcher,
|
||||
file_watcher, maybe_pluralize)
|
||||
from pelican.utils import (clean_output_dir, file_watcher,
|
||||
folder_watcher, maybe_pluralize)
|
||||
from pelican.writers import Writer
|
||||
|
||||
__version__ = "3.6.4.dev0"
|
||||
|
||||
DEFAULT_CONFIG_NAME = 'pelicanconf.py'
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Pelican(object):
|
||||
|
||||
def __init__(self, settings):
|
||||
"""
|
||||
Pelican initialisation, performs some checks on the environment before
|
||||
doing anything else.
|
||||
"""Pelican initialisation
|
||||
|
||||
Performs some checks on the environment before doing anything else.
|
||||
"""
|
||||
|
||||
# define the default settings
|
||||
|
|
@ -152,7 +148,7 @@ class Pelican(object):
|
|||
context = self.settings.copy()
|
||||
# Share these among all the generators and content objects:
|
||||
context['filenames'] = {} # maps source path to Content object or None
|
||||
context['localsiteurl'] = self.settings['SITEURL']
|
||||
context['localsiteurl'] = self.settings['SITEURL']
|
||||
|
||||
generators = [
|
||||
cls(
|
||||
|
|
@ -190,23 +186,23 @@ class Pelican(object):
|
|||
if isinstance(g, PagesGenerator))
|
||||
|
||||
pluralized_articles = maybe_pluralize(
|
||||
len(articles_generator.articles) +
|
||||
len(articles_generator.translations),
|
||||
(len(articles_generator.articles) +
|
||||
len(articles_generator.translations)),
|
||||
'article',
|
||||
'articles')
|
||||
pluralized_drafts = maybe_pluralize(
|
||||
len(articles_generator.drafts) +
|
||||
len(articles_generator.drafts_translations),
|
||||
(len(articles_generator.drafts) +
|
||||
len(articles_generator.drafts_translations)),
|
||||
'draft',
|
||||
'drafts')
|
||||
pluralized_pages = maybe_pluralize(
|
||||
len(pages_generator.pages) +
|
||||
len(pages_generator.translations),
|
||||
(len(pages_generator.pages) +
|
||||
len(pages_generator.translations)),
|
||||
'page',
|
||||
'pages')
|
||||
pluralized_hidden_pages = maybe_pluralize(
|
||||
len(pages_generator.hidden_pages) +
|
||||
len(pages_generator.hidden_translations),
|
||||
(len(pages_generator.hidden_pages) +
|
||||
len(pages_generator.hidden_translations)),
|
||||
'hidden page',
|
||||
'hidden pages')
|
||||
|
||||
|
|
@ -243,8 +239,8 @@ class Pelican(object):
|
|||
return generators
|
||||
|
||||
def get_writer(self):
|
||||
writers = [ w for (_, w) in signals.get_writer.send(self)
|
||||
if isinstance(w, type) ]
|
||||
writers = [w for (_, w) in signals.get_writer.send(self)
|
||||
if isinstance(w, type)]
|
||||
writers_found = len(writers)
|
||||
if writers_found == 0:
|
||||
return Writer(self.output_path, settings=self.settings)
|
||||
|
|
@ -254,15 +250,15 @@ class Pelican(object):
|
|||
logger.debug('Found writer: %s', writer)
|
||||
else:
|
||||
logger.warning(
|
||||
'%s writers found, using only first one: %s',
|
||||
'%s writers found, using only first one: %s',
|
||||
writers_found, writer)
|
||||
return writer(self.output_path, settings=self.settings)
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""A tool to generate a static blog,
|
||||
with restructured text input files.""",
|
||||
description='A tool to generate a static blog, '
|
||||
' with restructured text input files.',
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
|
||||
|
|
@ -354,7 +350,7 @@ def get_config(args):
|
|||
# argparse returns bytes in Py2. There is no definite answer as to which
|
||||
# encoding argparse (or sys.argv) uses.
|
||||
# "Best" option seems to be locale.getpreferredencoding()
|
||||
# ref: http://mail.python.org/pipermail/python-list/2006-October/405766.html
|
||||
# http://mail.python.org/pipermail/python-list/2006-October/405766.html
|
||||
if not six.PY3:
|
||||
enc = locale.getpreferredencoding()
|
||||
for key in config:
|
||||
|
|
@ -424,7 +420,8 @@ def main():
|
|||
|
||||
# Added static paths
|
||||
# Add new watchers and set them as modified
|
||||
for static_path in set(new_static).difference(old_static):
|
||||
new_watchers = set(new_static).difference(old_static)
|
||||
for static_path in new_watchers:
|
||||
static_key = '[static]%s' % static_path
|
||||
watchers[static_key] = folder_watcher(
|
||||
os.path.join(pelican.path, static_path),
|
||||
|
|
@ -434,7 +431,8 @@ def main():
|
|||
|
||||
# Removed static paths
|
||||
# Remove watchers and modified values
|
||||
for static_path in set(old_static).difference(new_static):
|
||||
old_watchers = set(old_static).difference(new_static)
|
||||
for static_path in old_watchers:
|
||||
static_key = '[static]%s' % static_path
|
||||
watchers.pop(static_key)
|
||||
modified.pop(static_key)
|
||||
|
|
|
|||
|
|
@ -1,16 +1,14 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
|
||||
from six.moves import cPickle as pickle
|
||||
|
||||
from pelican.utils import mkdir_p
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -83,6 +81,7 @@ class FileStampDataCacher(FileDataCacher):
|
|||
"""This sublcass additionally sets filestamp function
|
||||
and base path for filestamping operations
|
||||
"""
|
||||
|
||||
super(FileStampDataCacher, self).__init__(settings, cache_name,
|
||||
caching_policy,
|
||||
load_policy)
|
||||
|
|
@ -118,6 +117,7 @@ class FileStampDataCacher(FileDataCacher):
|
|||
a hash for a function name in the hashlib module
|
||||
or an empty bytes string otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._filestamp_func(filename)
|
||||
except (IOError, OSError, TypeError) as err:
|
||||
|
|
@ -133,6 +133,7 @@ class FileStampDataCacher(FileDataCacher):
|
|||
Modification is checked by comparing the cached
|
||||
and current file stamp.
|
||||
"""
|
||||
|
||||
stamp, data = super(FileStampDataCacher, self).get_cached_data(
|
||||
filename, (None, default))
|
||||
if stamp != self._get_file_stamp(filename):
|
||||
|
|
|
|||
|
|
@ -1,23 +1,24 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
import six
|
||||
from six.moves.urllib.parse import urlparse, urlunparse
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import copy
|
||||
import locale
|
||||
import logging
|
||||
import functools
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import pytz
|
||||
|
||||
import six
|
||||
from six.moves.urllib.parse import urlparse, urlunparse
|
||||
|
||||
from pelican import signals
|
||||
from pelican.settings import DEFAULT_CONFIG
|
||||
from pelican.utils import (slugify, truncate_html_words, memoized, strftime,
|
||||
python_2_unicode_compatible, deprecated_attribute,
|
||||
path_to_url, posixize_path, set_date_tzinfo, SafeDatetime)
|
||||
from pelican.utils import (SafeDatetime, deprecated_attribute, memoized,
|
||||
path_to_url, posixize_path,
|
||||
python_2_unicode_compatible, set_date_tzinfo,
|
||||
slugify, strftime, truncate_html_words)
|
||||
|
||||
# Import these so that they're avalaible when you import from pelican.contents.
|
||||
from pelican.urlwrappers import (URLWrapper, Author, Category, Tag) # NOQA
|
||||
|
|
@ -66,7 +67,7 @@ class Content(object):
|
|||
# also keep track of the metadata attributes available
|
||||
self.metadata = local_metadata
|
||||
|
||||
#default template if it's not defined in page
|
||||
# default template if it's not defined in page
|
||||
self.template = self._get_template()
|
||||
|
||||
# First, read the authors from "authors", if not, fallback to "author"
|
||||
|
|
@ -94,13 +95,16 @@ class Content(object):
|
|||
# create the slug if not existing, generate slug according to
|
||||
# setting of SLUG_ATTRIBUTE
|
||||
if not hasattr(self, 'slug'):
|
||||
if settings['SLUGIFY_SOURCE'] == 'title' and hasattr(self, 'title'):
|
||||
if (settings['SLUGIFY_SOURCE'] == 'title' and
|
||||
hasattr(self, 'title')):
|
||||
self.slug = slugify(self.title,
|
||||
settings.get('SLUG_SUBSTITUTIONS', ()))
|
||||
elif settings['SLUGIFY_SOURCE'] == 'basename' and source_path != None:
|
||||
basename = os.path.basename(os.path.splitext(source_path)[0])
|
||||
self.slug = slugify(basename,
|
||||
settings.get('SLUG_SUBSTITUTIONS', ()))
|
||||
settings.get('SLUG_SUBSTITUTIONS', ()))
|
||||
elif (settings['SLUGIFY_SOURCE'] == 'basename' and
|
||||
source_path is not None):
|
||||
basename = os.path.basename(
|
||||
os.path.splitext(source_path)[0])
|
||||
self.slug = slugify(
|
||||
basename, settings.get('SLUG_SUBSTITUTIONS', ()))
|
||||
|
||||
self.source_path = source_path
|
||||
|
||||
|
|
@ -233,7 +237,8 @@ class Content(object):
|
|||
if isinstance(linked_content, Static):
|
||||
linked_content.attach_to(self)
|
||||
else:
|
||||
logger.warning("%s used {attach} link syntax on a "
|
||||
logger.warning(
|
||||
"%s used {attach} link syntax on a "
|
||||
"non-static file. Use {filename} instead.",
|
||||
self.get_relative_source_path())
|
||||
origin = '/'.join((siteurl, linked_content.url))
|
||||
|
|
@ -241,7 +246,7 @@ class Content(object):
|
|||
else:
|
||||
logger.warning(
|
||||
"Unable to find `%s`, skipping url replacement.",
|
||||
value.geturl(), extra = {
|
||||
value.geturl(), extra={
|
||||
'limit_msg': ("Other resources were not found "
|
||||
"and their urls not replaced")})
|
||||
elif what == 'category':
|
||||
|
|
@ -250,9 +255,9 @@ class Content(object):
|
|||
origin = '/'.join((siteurl, Tag(path, self.settings).url))
|
||||
else:
|
||||
logger.warning(
|
||||
"Replacement Indicator '%s' not recognized, "
|
||||
"skipping replacement",
|
||||
what)
|
||||
"Replacement Indicator '%s' not recognized, "
|
||||
"skipping replacement",
|
||||
what)
|
||||
|
||||
# keep all other parts, such as query, fragment, etc.
|
||||
parts = list(value)
|
||||
|
|
@ -337,7 +342,9 @@ class Content(object):
|
|||
|
||||
return posixize_path(
|
||||
os.path.relpath(
|
||||
os.path.abspath(os.path.join(self.settings['PATH'], source_path)),
|
||||
os.path.abspath(os.path.join(
|
||||
self.settings['PATH'],
|
||||
source_path)),
|
||||
os.path.abspath(self.settings['PATH'])
|
||||
))
|
||||
|
||||
|
|
@ -402,9 +409,12 @@ class Static(Page):
|
|||
def attach_to(self, content):
|
||||
"""Override our output directory with that of the given content object.
|
||||
"""
|
||||
# Determine our file's new output path relative to the linking document.
|
||||
# If it currently lives beneath the linking document's source directory,
|
||||
# preserve that relationship on output. Otherwise, make it a sibling.
|
||||
|
||||
# Determine our file's new output path relative to the linking
|
||||
# document. If it currently lives beneath the linking
|
||||
# document's source directory, preserve that relationship on output.
|
||||
# Otherwise, make it a sibling.
|
||||
|
||||
linking_source_dir = os.path.dirname(content.source_path)
|
||||
tail_path = os.path.relpath(self.source_path, linking_source_dir)
|
||||
if tail_path.startswith(os.pardir + os.sep):
|
||||
|
|
@ -420,11 +430,14 @@ class Static(Page):
|
|||
# 'some/content' with a file named 'index.html'.) Rather than trying
|
||||
# to figure it out by comparing the linking document's url and save_as
|
||||
# path, we simply build our new url from our new save_as path.
|
||||
|
||||
new_url = path_to_url(new_save_as)
|
||||
|
||||
def _log_reason(reason):
|
||||
logger.warning("The {attach} link in %s cannot relocate %s "
|
||||
"because %s. Falling back to {filename} link behavior instead.",
|
||||
logger.warning(
|
||||
"The {attach} link in %s cannot relocate "
|
||||
"%s because %s. Falling back to "
|
||||
"{filename} link behavior instead.",
|
||||
content.get_relative_source_path(),
|
||||
self.get_relative_source_path(), reason,
|
||||
extra={'limit_msg': "More {attach} warnings silenced."})
|
||||
|
|
@ -452,5 +465,6 @@ def is_valid_content(content, f):
|
|||
content.check_properties()
|
||||
return True
|
||||
except NameError as e:
|
||||
logger.error("Skipping %s: could not find information about '%s'", f, e)
|
||||
logger.error(
|
||||
"Skipping %s: could not find information about '%s'", f, e)
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -1,28 +1,28 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import os
|
||||
import six
|
||||
import logging
|
||||
import shutil
|
||||
import fnmatch
|
||||
import calendar
|
||||
|
||||
import fnmatch
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
from codecs import open
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from itertools import chain, groupby
|
||||
from operator import attrgetter
|
||||
|
||||
from jinja2 import (Environment, FileSystemLoader, PrefixLoader, ChoiceLoader,
|
||||
BaseLoader, TemplateNotFound)
|
||||
from jinja2 import (BaseLoader, ChoiceLoader, Environment, FileSystemLoader,
|
||||
PrefixLoader, TemplateNotFound)
|
||||
|
||||
import six
|
||||
|
||||
from pelican import signals
|
||||
from pelican.cache import FileStampDataCacher
|
||||
from pelican.contents import Article, Draft, Page, Static, is_valid_content
|
||||
from pelican.readers import Readers
|
||||
from pelican.utils import (copy, process_translations, mkdir_p, DateFormatter,
|
||||
python_2_unicode_compatible, posixize_path)
|
||||
from pelican import signals
|
||||
from pelican.utils import (DateFormatter, copy, mkdir_p, posixize_path,
|
||||
process_translations, python_2_unicode_compatible)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -31,6 +31,7 @@ logger = logging.getLogger(__name__)
|
|||
class PelicanTemplateNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@python_2_unicode_compatible
|
||||
class Generator(object):
|
||||
"""Baseclass generator"""
|
||||
|
|
@ -90,8 +91,9 @@ class Generator(object):
|
|||
try:
|
||||
self._templates[name] = self.env.get_template(name + '.html')
|
||||
except TemplateNotFound:
|
||||
raise PelicanTemplateNotFound('[templates] unable to load %s.html from %s'
|
||||
% (name, self._templates_path))
|
||||
raise PelicanTemplateNotFound(
|
||||
'[templates] unable to load {}.html from {}'.format(
|
||||
name, self._templates_path))
|
||||
return self._templates[name]
|
||||
|
||||
def _include_path(self, path, extensions=None):
|
||||
|
|
@ -105,7 +107,7 @@ class Generator(object):
|
|||
extensions = tuple(self.readers.extensions)
|
||||
basename = os.path.basename(path)
|
||||
|
||||
#check IGNORE_FILES
|
||||
# check IGNORE_FILES
|
||||
ignores = self.settings['IGNORE_FILES']
|
||||
if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):
|
||||
return False
|
||||
|
|
@ -122,8 +124,9 @@ class Generator(object):
|
|||
:param extensions: the list of allowed extensions (if False, all
|
||||
extensions are allowed)
|
||||
"""
|
||||
# backward compatibility for older generators
|
||||
if isinstance(paths, six.string_types):
|
||||
paths = [paths] # backward compatibility for older generators
|
||||
paths = [paths]
|
||||
|
||||
# group the exclude dir names by parent path, for use with os.walk()
|
||||
exclusions_by_dirpath = {}
|
||||
|
|
@ -138,7 +141,8 @@ class Generator(object):
|
|||
root = os.path.join(self.path, path) if path else self.path
|
||||
|
||||
if os.path.isdir(root):
|
||||
for dirpath, dirs, temp_files in os.walk(root, followlinks=True):
|
||||
for dirpath, dirs, temp_files in os.walk(
|
||||
root, followlinks=True):
|
||||
drop = []
|
||||
excl = exclusions_by_dirpath.get(dirpath, ())
|
||||
for d in dirs:
|
||||
|
|
@ -178,7 +182,8 @@ class Generator(object):
|
|||
before this method is called, even if they failed to process.)
|
||||
The path argument is expected to be relative to self.path.
|
||||
"""
|
||||
return posixize_path(os.path.normpath(path)) in self.context['filenames']
|
||||
return (posixize_path(os.path.normpath(path))
|
||||
in self.context['filenames'])
|
||||
|
||||
def _update_context(self, items):
|
||||
"""Update the context with the given items from the currrent
|
||||
|
|
@ -211,7 +216,8 @@ class CachingGenerator(Generator, FileStampDataCacher):
|
|||
readers_cache_name=(cls_name + '-Readers'),
|
||||
**kwargs)
|
||||
|
||||
cache_this_level = self.settings['CONTENT_CACHING_LAYER'] == 'generator'
|
||||
cache_this_level = \
|
||||
self.settings['CONTENT_CACHING_LAYER'] == 'generator'
|
||||
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
|
||||
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
|
||||
FileStampDataCacher.__init__(self, self.settings, cls_name,
|
||||
|
|
@ -259,14 +265,14 @@ class ArticlesGenerator(CachingGenerator):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""initialize properties"""
|
||||
self.articles = [] # only articles in default language
|
||||
self.articles = [] # only articles in default language
|
||||
self.translations = []
|
||||
self.dates = {}
|
||||
self.tags = defaultdict(list)
|
||||
self.categories = defaultdict(list)
|
||||
self.related_posts = []
|
||||
self.authors = defaultdict(list)
|
||||
self.drafts = [] # only drafts in default language
|
||||
self.drafts = [] # only drafts in default language
|
||||
self.drafts_translations = []
|
||||
super(ArticlesGenerator, self).__init__(*args, **kwargs)
|
||||
signals.article_generator_init.send(self)
|
||||
|
|
@ -282,8 +288,8 @@ class ArticlesGenerator(CachingGenerator):
|
|||
writer.write_feed(self.articles, self.context,
|
||||
self.settings['FEED_RSS'], feed_type='rss')
|
||||
|
||||
if (self.settings.get('FEED_ALL_ATOM')
|
||||
or self.settings.get('FEED_ALL_RSS')):
|
||||
if (self.settings.get('FEED_ALL_ATOM') or
|
||||
self.settings.get('FEED_ALL_RSS')):
|
||||
all_articles = list(self.articles)
|
||||
for article in self.articles:
|
||||
all_articles.extend(article.translations)
|
||||
|
|
@ -322,8 +328,8 @@ class ArticlesGenerator(CachingGenerator):
|
|||
self.settings['AUTHOR_FEED_RSS']
|
||||
% auth.slug, feed_type='rss')
|
||||
|
||||
if (self.settings.get('TAG_FEED_ATOM')
|
||||
or self.settings.get('TAG_FEED_RSS')):
|
||||
if (self.settings.get('TAG_FEED_ATOM') or
|
||||
self.settings.get('TAG_FEED_RSS')):
|
||||
for tag, arts in self.tags.items():
|
||||
arts.sort(key=attrgetter('date'), reverse=True)
|
||||
if self.settings.get('TAG_FEED_ATOM'):
|
||||
|
|
@ -336,8 +342,8 @@ class ArticlesGenerator(CachingGenerator):
|
|||
self.settings['TAG_FEED_RSS'] % tag.slug,
|
||||
feed_type='rss')
|
||||
|
||||
if (self.settings.get('TRANSLATION_FEED_ATOM')
|
||||
or self.settings.get('TRANSLATION_FEED_RSS')):
|
||||
if (self.settings.get('TRANSLATION_FEED_ATOM') or
|
||||
self.settings.get('TRANSLATION_FEED_RSS')):
|
||||
translations_feeds = defaultdict(list)
|
||||
for article in chain(self.articles, self.translations):
|
||||
translations_feeds[article.lang].append(article)
|
||||
|
|
@ -472,9 +478,9 @@ class ArticlesGenerator(CachingGenerator):
|
|||
"""Generate drafts pages."""
|
||||
for draft in chain(self.drafts_translations, self.drafts):
|
||||
write(draft.save_as, self.get_template(draft.template),
|
||||
self.context, article=draft, category=draft.category,
|
||||
override_output=hasattr(draft, 'override_save_as'),
|
||||
blog=True, all_articles=self.articles)
|
||||
self.context, article=draft, category=draft.category,
|
||||
override_output=hasattr(draft, 'override_save_as'),
|
||||
blog=True, all_articles=self.articles)
|
||||
|
||||
def generate_pages(self, writer):
|
||||
"""Generate the pages on the disk"""
|
||||
|
|
@ -503,7 +509,8 @@ class ArticlesGenerator(CachingGenerator):
|
|||
exclude=self.settings['ARTICLE_EXCLUDES']):
|
||||
article_or_draft = self.get_cached_data(f, None)
|
||||
if article_or_draft is None:
|
||||
#TODO needs overhaul, maybe nomad for read_file solution, unified behaviour
|
||||
# TODO needs overhaul, maybe nomad for read_file
|
||||
# solution, unified behaviour
|
||||
try:
|
||||
article_or_draft = self.readers.read_file(
|
||||
base_path=self.path, path=f, content_class=Article,
|
||||
|
|
@ -513,7 +520,8 @@ class ArticlesGenerator(CachingGenerator):
|
|||
context_signal=signals.article_generator_context,
|
||||
context_sender=self)
|
||||
except Exception as e:
|
||||
logger.error('Could not process %s\n%s', f, e,
|
||||
logger.error(
|
||||
'Could not process %s\n%s', f, e,
|
||||
exc_info=self.settings.get('DEBUG', False))
|
||||
self._add_failed_source_path(f)
|
||||
continue
|
||||
|
|
@ -535,8 +543,9 @@ class ArticlesGenerator(CachingGenerator):
|
|||
self.add_source_path(article_or_draft)
|
||||
all_drafts.append(article_or_draft)
|
||||
else:
|
||||
logger.error("Unknown status '%s' for file %s, skipping it.",
|
||||
article_or_draft.status, f)
|
||||
logger.error(
|
||||
"Unknown status '%s' for file %s, skipping it.",
|
||||
article_or_draft.status, f)
|
||||
self._add_failed_source_path(f)
|
||||
continue
|
||||
|
||||
|
|
@ -544,9 +553,9 @@ class ArticlesGenerator(CachingGenerator):
|
|||
|
||||
self.add_source_path(article_or_draft)
|
||||
|
||||
|
||||
self.articles, self.translations = process_translations(all_articles,
|
||||
order_by=self.settings['ARTICLE_ORDER_BY'])
|
||||
self.articles, self.translations = process_translations(
|
||||
all_articles,
|
||||
order_by=self.settings['ARTICLE_ORDER_BY'])
|
||||
self.drafts, self.drafts_translations = \
|
||||
process_translations(all_drafts)
|
||||
|
||||
|
|
@ -615,7 +624,8 @@ class PagesGenerator(CachingGenerator):
|
|||
context_signal=signals.page_generator_context,
|
||||
context_sender=self)
|
||||
except Exception as e:
|
||||
logger.error('Could not process %s\n%s', f, e,
|
||||
logger.error(
|
||||
'Could not process %s\n%s', f, e,
|
||||
exc_info=self.settings.get('DEBUG', False))
|
||||
self._add_failed_source_path(f)
|
||||
continue
|
||||
|
|
@ -629,8 +639,9 @@ class PagesGenerator(CachingGenerator):
|
|||
elif page.status.lower() == "hidden":
|
||||
hidden_pages.append(page)
|
||||
else:
|
||||
logger.error("Unknown status '%s' for file %s, skipping it.",
|
||||
page.status, f)
|
||||
logger.error(
|
||||
"Unknown status '%s' for file %s, skipping it.",
|
||||
page.status, f)
|
||||
self._add_failed_source_path(f)
|
||||
continue
|
||||
|
||||
|
|
@ -638,10 +649,11 @@ class PagesGenerator(CachingGenerator):
|
|||
|
||||
self.add_source_path(page)
|
||||
|
||||
self.pages, self.translations = process_translations(all_pages,
|
||||
order_by=self.settings['PAGE_ORDER_BY'])
|
||||
self.hidden_pages, self.hidden_translations = (
|
||||
process_translations(hidden_pages))
|
||||
self.pages, self.translations = process_translations(
|
||||
all_pages,
|
||||
order_by=self.settings['PAGE_ORDER_BY'])
|
||||
self.hidden_pages, self.hidden_translations = \
|
||||
process_translations(hidden_pages)
|
||||
|
||||
self._update_context(('pages', 'hidden_pages'))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from collections import Mapping, defaultdict
|
||||
|
||||
import six
|
||||
|
||||
__all__ = [
|
||||
'init'
|
||||
]
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import locale
|
||||
|
||||
from collections import defaultdict, Mapping
|
||||
|
||||
import six
|
||||
|
||||
class BaseFormatter(logging.Formatter):
|
||||
def __init__(self, fmt=None, datefmt=None):
|
||||
|
|
@ -20,7 +20,8 @@ class BaseFormatter(logging.Formatter):
|
|||
super(BaseFormatter, self).__init__(fmt=FORMAT, datefmt=datefmt)
|
||||
|
||||
def format(self, record):
|
||||
record.__dict__['customlevelname'] = self._get_levelname(record.levelname)
|
||||
customlevel = self._get_levelname(record.levelname)
|
||||
record.__dict__['customlevelname'] = customlevel
|
||||
# format multiline messages 'nicely' to make it clear they are together
|
||||
record.msg = record.msg.replace('\n', '\n | ')
|
||||
return super(BaseFormatter, self).format(record)
|
||||
|
|
@ -132,13 +133,13 @@ class SafeLogger(logging.Logger):
|
|||
def _log(self, level, msg, args, exc_info=None, extra=None):
|
||||
# if the only argument is a Mapping, Logger uses that for formatting
|
||||
# format values for that case
|
||||
if args and len(args)==1 and isinstance(args[0], Mapping):
|
||||
if args and len(args) == 1 and isinstance(args[0], Mapping):
|
||||
args = ({k: self._decode_arg(v) for k, v in args[0].items()},)
|
||||
# otherwise, format each arg
|
||||
else:
|
||||
args = tuple(self._decode_arg(arg) for arg in args)
|
||||
super(SafeLogger, self)._log(level, msg, args,
|
||||
exc_info=exc_info, extra=extra)
|
||||
super(SafeLogger, self)._log(
|
||||
level, msg, args, exc_info=exc_info, extra=extra)
|
||||
|
||||
def _decode_arg(self, arg):
|
||||
'''
|
||||
|
|
@ -175,8 +176,7 @@ def init(level=None, handler=logging.StreamHandler()):
|
|||
|
||||
logger = logging.getLogger()
|
||||
|
||||
if (os.isatty(sys.stdout.fileno())
|
||||
and not sys.platform.startswith('win')):
|
||||
if os.isatty(sys.stdout.fileno()) and not sys.platform.startswith('win'):
|
||||
fmt = ANSIFormatter()
|
||||
else:
|
||||
fmt = TextFormatter()
|
||||
|
|
|
|||
|
|
@ -1,18 +1,15 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
import six
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
# From django.core.paginator
|
||||
from collections import namedtuple
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
|
||||
from collections import namedtuple
|
||||
from math import ceil
|
||||
|
||||
import six
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
PaginationRule = namedtuple(
|
||||
'PaginationRule',
|
||||
'min_page URL SAVE_AS',
|
||||
|
|
@ -143,7 +140,7 @@ class Page(object):
|
|||
'settings': self.settings,
|
||||
'base_name': os.path.dirname(self.name),
|
||||
'number_sep': '/',
|
||||
'extension': self.extension,
|
||||
'extension': self.extension,
|
||||
}
|
||||
|
||||
if self.number == 1:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
|
@ -9,24 +9,50 @@ import docutils
|
|||
import docutils.core
|
||||
import docutils.io
|
||||
from docutils.writers.html4css1 import HTMLTranslator
|
||||
import six
|
||||
|
||||
# import the directives to have pygments support
|
||||
import six
|
||||
from six.moves.html_parser import HTMLParser
|
||||
|
||||
from pelican import rstdirectives # NOQA
|
||||
from pelican import signals
|
||||
from pelican.cache import FileStampDataCacher
|
||||
from pelican.contents import Author, Category, Page, Tag
|
||||
from pelican.utils import SafeDatetime, get_date, pelican_open, posixize_path
|
||||
|
||||
try:
|
||||
from markdown import Markdown
|
||||
except ImportError:
|
||||
Markdown = False # NOQA
|
||||
|
||||
try:
|
||||
from html import escape
|
||||
except ImportError:
|
||||
from cgi import escape
|
||||
from six.moves.html_parser import HTMLParser
|
||||
|
||||
from pelican import signals
|
||||
from pelican.cache import FileStampDataCacher
|
||||
from pelican.contents import Page, Category, Tag, Author
|
||||
from pelican.utils import get_date, pelican_open, SafeDatetime, posixize_path
|
||||
# Metadata processors have no way to discard an unwanted value, so we have
|
||||
# them return this value instead to signal that it should be discarded later.
|
||||
# This means that _filter_discardable_metadata() must be called on processed
|
||||
# metadata dicts before use, to remove the items with the special value.
|
||||
_DISCARD = object()
|
||||
METADATA_PROCESSORS = {
|
||||
'tags': lambda x, y: ([
|
||||
Tag(tag, y)
|
||||
for tag in ensure_metadata_list(x)
|
||||
] or _DISCARD),
|
||||
'date': lambda x, y: get_date(x.replace('_', ' ')),
|
||||
'modified': lambda x, y: get_date(x),
|
||||
'status': lambda x, y: x.strip() or _DISCARD,
|
||||
'category': lambda x, y: _process_if_nonempty(Category, x, y),
|
||||
'author': lambda x, y: _process_if_nonempty(Author, x, y),
|
||||
'authors': lambda x, y: ([
|
||||
Author(author, y)
|
||||
for author in ensure_metadata_list(x)
|
||||
] or _DISCARD),
|
||||
'slug': lambda x, y: x.strip() or _DISCARD,
|
||||
}
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ensure_metadata_list(text):
|
||||
"""Canonicalize the format of a list of authors or tags. This works
|
||||
|
|
@ -49,13 +75,6 @@ def ensure_metadata_list(text):
|
|||
return [v for v in (w.strip() for w in text) if v]
|
||||
|
||||
|
||||
# Metadata processors have no way to discard an unwanted value, so we have
|
||||
# them return this value instead to signal that it should be discarded later.
|
||||
# This means that _filter_discardable_metadata() must be called on processed
|
||||
# metadata dicts before use, to remove the items with the special value.
|
||||
_DISCARD = object()
|
||||
|
||||
|
||||
def _process_if_nonempty(processor, name, settings):
|
||||
"""Removes extra whitespace from name and applies a metadata processor.
|
||||
If name is empty or all whitespace, returns _DISCARD instead.
|
||||
|
|
@ -64,28 +83,11 @@ def _process_if_nonempty(processor, name, settings):
|
|||
return processor(name, settings) if name else _DISCARD
|
||||
|
||||
|
||||
METADATA_PROCESSORS = {
|
||||
'tags': lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)]
|
||||
or _DISCARD),
|
||||
'date': lambda x, y: get_date(x.replace('_', ' ')),
|
||||
'modified': lambda x, y: get_date(x),
|
||||
'status': lambda x, y: x.strip() or _DISCARD,
|
||||
'category': lambda x, y: _process_if_nonempty(Category, x, y),
|
||||
'author': lambda x, y: _process_if_nonempty(Author, x, y),
|
||||
'authors': lambda x, y: ([Author(author, y)
|
||||
for author in ensure_metadata_list(x)]
|
||||
or _DISCARD),
|
||||
'slug': lambda x, y: x.strip() or _DISCARD,
|
||||
}
|
||||
|
||||
|
||||
def _filter_discardable_metadata(metadata):
|
||||
"""Return a copy of a dict, minus any items marked as discardable."""
|
||||
return {name: val for name, val in metadata.items() if val is not _DISCARD}
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class BaseReader(object):
|
||||
"""Base class to read files.
|
||||
|
||||
|
|
@ -267,8 +269,10 @@ class MarkdownReader(BaseReader):
|
|||
output[name] = self.process_metadata(name, summary)
|
||||
elif name in METADATA_PROCESSORS:
|
||||
if len(value) > 1:
|
||||
logger.warning('Duplicate definition of `%s` '
|
||||
'for %s. Using first one.', name, self._source_path)
|
||||
logger.warning(
|
||||
'Duplicate definition of `%s` '
|
||||
'for %s. Using first one.',
|
||||
name, self._source_path)
|
||||
output[name] = self.process_metadata(name, value[0])
|
||||
elif len(value) > 1:
|
||||
# handle list metadata as list of string
|
||||
|
|
@ -380,7 +384,8 @@ class HTMLReader(BaseReader):
|
|||
def _handle_meta_tag(self, attrs):
|
||||
name = self._attr_value(attrs, 'name')
|
||||
if name is None:
|
||||
attr_serialized = ', '.join(['{}="{}"'.format(k, v) for k, v in attrs])
|
||||
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
|
||||
attr_serialized = ', '.join(attr_list)
|
||||
logger.warning("Meta tag in file %s does not have a 'name' "
|
||||
"attribute, skipping. Attributes: %s",
|
||||
self._filename, attr_serialized)
|
||||
|
|
@ -394,9 +399,9 @@ class HTMLReader(BaseReader):
|
|||
"Meta tag attribute 'contents' used in file %s, should"
|
||||
" be changed to 'content'",
|
||||
self._filename,
|
||||
extra={'limit_msg': ("Other files have meta tag "
|
||||
"attribute 'contents' that should "
|
||||
"be changed to 'content'")})
|
||||
extra={'limit_msg': "Other files have meta tag "
|
||||
"attribute 'contents' that should "
|
||||
"be changed to 'content'"})
|
||||
|
||||
if name == 'keywords':
|
||||
name = 'tags'
|
||||
|
|
@ -474,7 +479,8 @@ class Readers(FileStampDataCacher):
|
|||
|
||||
path = os.path.abspath(os.path.join(base_path, path))
|
||||
source_path = posixize_path(os.path.relpath(path, base_path))
|
||||
logger.debug('Read file %s -> %s',
|
||||
logger.debug(
|
||||
'Read file %s -> %s',
|
||||
source_path, content_class.__name__)
|
||||
|
||||
if not fmt:
|
||||
|
|
@ -486,7 +492,8 @@ class Readers(FileStampDataCacher):
|
|||
'Pelican does not know how to parse %s', path)
|
||||
|
||||
if preread_signal:
|
||||
logger.debug('Signal %s.send(%s)',
|
||||
logger.debug(
|
||||
'Signal %s.send(%s)',
|
||||
preread_signal.name, preread_sender)
|
||||
preread_signal.send(preread_sender)
|
||||
|
||||
|
|
@ -527,7 +534,9 @@ class Readers(FileStampDataCacher):
|
|||
def typogrify_wrapper(text):
|
||||
"""Ensures ignore_tags feature is backward compatible"""
|
||||
try:
|
||||
return typogrify(text, self.settings['TYPOGRIFY_IGNORE_TAGS'])
|
||||
return typogrify(
|
||||
text,
|
||||
self.settings['TYPOGRIFY_IGNORE_TAGS'])
|
||||
except TypeError:
|
||||
return typogrify(text)
|
||||
|
||||
|
|
@ -539,8 +548,10 @@ class Readers(FileStampDataCacher):
|
|||
metadata['summary'] = typogrify_wrapper(metadata['summary'])
|
||||
|
||||
if context_signal:
|
||||
logger.debug('Signal %s.send(%s, <metadata>)',
|
||||
context_signal.name, context_sender)
|
||||
logger.debug(
|
||||
'Signal %s.send(%s, <metadata>)',
|
||||
context_signal.name,
|
||||
context_sender)
|
||||
context_signal.send(context_sender, metadata=metadata)
|
||||
|
||||
return content_class(content=content, metadata=metadata,
|
||||
|
|
@ -591,7 +602,8 @@ def default_metadata(settings=None, process=None):
|
|||
if process:
|
||||
value = process('category', value)
|
||||
metadata['category'] = value
|
||||
if settings.get('DEFAULT_DATE', None) and settings['DEFAULT_DATE'] != 'fs':
|
||||
if settings.get('DEFAULT_DATE', None) and \
|
||||
settings['DEFAULT_DATE'] != 'fs':
|
||||
metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE'])
|
||||
return metadata
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,17 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from docutils import nodes, utils
|
||||
from docutils.parsers.rst import directives, roles, Directive
|
||||
from pygments.formatters import HtmlFormatter
|
||||
from docutils.parsers.rst import Directive, directives, roles
|
||||
|
||||
from pygments import highlight
|
||||
from pygments.lexers import get_lexer_by_name, TextLexer
|
||||
import re
|
||||
from pygments.formatters import HtmlFormatter
|
||||
from pygments.lexers import TextLexer, get_lexer_by_name
|
||||
|
||||
import six
|
||||
|
||||
import pelican.settings as pys
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +1,18 @@
|
|||
from __future__ import print_function
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
from six.moves import SimpleHTTPServer as srvmod
|
||||
from six.moves import socketserver
|
||||
|
||||
try:
|
||||
from magic import from_file as magic_from_file
|
||||
except ImportError:
|
||||
magic_from_file = None
|
||||
|
||||
from six.moves import SimpleHTTPServer as srvmod
|
||||
from six.moves import socketserver
|
||||
|
||||
|
||||
class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
|
||||
SUFFIXES = ['', '.html', '/index.html']
|
||||
|
|
@ -54,12 +56,12 @@ if __name__ == '__main__':
|
|||
|
||||
socketserver.TCPServer.allow_reuse_address = True
|
||||
try:
|
||||
httpd = socketserver.TCPServer((SERVER, PORT), ComplexHTTPRequestHandler)
|
||||
httpd = socketserver.TCPServer(
|
||||
(SERVER, PORT), ComplexHTTPRequestHandler)
|
||||
except OSError as e:
|
||||
logging.error("Could not listen on port %s, server %s.", PORT, SERVER)
|
||||
sys.exit(getattr(e, 'exitcode', 1))
|
||||
|
||||
|
||||
logging.info("Serving at port %s, server %s.", PORT, SERVER)
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
|
|
|
|||
|
|
@ -1,31 +1,32 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
import six
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import copy
|
||||
import inspect
|
||||
import os
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
from os.path import isabs
|
||||
from posixpath import join as posix_join
|
||||
|
||||
import six
|
||||
|
||||
from pelican.log import LimitFilter
|
||||
|
||||
try:
|
||||
# SourceFileLoader is the recommended way in 3.3+
|
||||
from importlib.machinery import SourceFileLoader
|
||||
load_source = lambda name, path: SourceFileLoader(name, path).load_module()
|
||||
|
||||
def load_source(name, path):
|
||||
return SourceFileLoader(name, path).load_module()
|
||||
except ImportError:
|
||||
# but it does not exist in 3.2-, so fall back to imp
|
||||
import imp
|
||||
load_source = imp.load_source
|
||||
|
||||
from os.path import isabs
|
||||
from pelican.utils import posix_join
|
||||
|
||||
from pelican.log import LimitFilter
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
'themes', 'notmyidea')
|
||||
DEFAULT_CONFIG = {
|
||||
|
|
@ -131,7 +132,7 @@ DEFAULT_CONFIG = {
|
|||
'LOAD_CONTENT_CACHE': False,
|
||||
'WRITE_SELECTED': [],
|
||||
'FORMATTED_FIELDS': ['summary'],
|
||||
}
|
||||
}
|
||||
|
||||
PYGMENTS_RST_OPTIONS = None
|
||||
|
||||
|
|
@ -158,8 +159,20 @@ def read_settings(path=None, override=None):
|
|||
"has been deprecated (should be a list)")
|
||||
local_settings['PLUGIN_PATHS'] = [local_settings['PLUGIN_PATHS']]
|
||||
elif local_settings['PLUGIN_PATHS'] is not None:
|
||||
local_settings['PLUGIN_PATHS'] = [os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(path), pluginpath)))
|
||||
if not isabs(pluginpath) else pluginpath for pluginpath in local_settings['PLUGIN_PATHS']]
|
||||
def getabs(path, pluginpath):
|
||||
if isabs(pluginpath):
|
||||
return pluginpath
|
||||
else:
|
||||
path_dirname = os.path.dirname(path)
|
||||
path_joined = os.path.join(path_dirname, pluginpath)
|
||||
path_normed = os.path.normpath(path_joined)
|
||||
path_absolute = os.path.abspath(path_normed)
|
||||
return path_absolute
|
||||
|
||||
pluginpath_list = [getabs(path, pluginpath)
|
||||
for pluginpath
|
||||
in local_settings['PLUGIN_PATHS']]
|
||||
local_settings['PLUGIN_PATHS'] = pluginpath_list
|
||||
else:
|
||||
local_settings = copy.deepcopy(DEFAULT_CONFIG)
|
||||
|
||||
|
|
@ -199,13 +212,13 @@ def configure_settings(settings):
|
|||
settings.
|
||||
Also, specify the log messages to be ignored.
|
||||
"""
|
||||
if not 'PATH' in settings or not os.path.isdir(settings['PATH']):
|
||||
if 'PATH' not in settings or not os.path.isdir(settings['PATH']):
|
||||
raise Exception('You need to specify a path containing the content'
|
||||
' (see pelican --help for more information)')
|
||||
|
||||
# specify the log messages to be ignored
|
||||
LimitFilter._ignore.update(set(settings.get('LOG_FILTER',
|
||||
DEFAULT_CONFIG['LOG_FILTER'])))
|
||||
log_filter = settings.get('LOG_FILTER', DEFAULT_CONFIG['LOG_FILTER'])
|
||||
LimitFilter._ignore.update(set(log_filter))
|
||||
|
||||
# lookup the theme in "pelican/themes" if the given one doesn't exist
|
||||
if not os.path.isdir(settings['THEME']):
|
||||
|
|
@ -223,19 +236,15 @@ def configure_settings(settings):
|
|||
settings['WRITE_SELECTED'] = [
|
||||
os.path.abspath(path) for path in
|
||||
settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED'])
|
||||
]
|
||||
]
|
||||
|
||||
# standardize strings to lowercase strings
|
||||
for key in [
|
||||
'DEFAULT_LANG',
|
||||
]:
|
||||
for key in ['DEFAULT_LANG']:
|
||||
if key in settings:
|
||||
settings[key] = settings[key].lower()
|
||||
|
||||
# standardize strings to lists
|
||||
for key in [
|
||||
'LOCALE',
|
||||
]:
|
||||
for key in ['LOCALE']:
|
||||
if key in settings and isinstance(settings[key], six.string_types):
|
||||
settings[key] = [settings[key]]
|
||||
|
||||
|
|
@ -243,12 +252,13 @@ def configure_settings(settings):
|
|||
for key, types in [
|
||||
('OUTPUT_SOURCES_EXTENSION', six.string_types),
|
||||
('FILENAME_METADATA', six.string_types),
|
||||
]:
|
||||
]:
|
||||
if key in settings and not isinstance(settings[key], types):
|
||||
value = settings.pop(key)
|
||||
logger.warn('Detected misconfigured %s (%s), '
|
||||
'falling back to the default (%s)',
|
||||
key, value, DEFAULT_CONFIG[key])
|
||||
logger.warn(
|
||||
'Detected misconfigured %s (%s), '
|
||||
'falling back to the default (%s)',
|
||||
key, value, DEFAULT_CONFIG[key])
|
||||
|
||||
# try to set the different locales, fallback on the default.
|
||||
locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])
|
||||
|
|
@ -270,16 +280,16 @@ def configure_settings(settings):
|
|||
logger.warning("Removed extraneous trailing slash from SITEURL.")
|
||||
# If SITEURL is defined but FEED_DOMAIN isn't,
|
||||
# set FEED_DOMAIN to SITEURL
|
||||
if not 'FEED_DOMAIN' in settings:
|
||||
if 'FEED_DOMAIN' not in settings:
|
||||
settings['FEED_DOMAIN'] = settings['SITEURL']
|
||||
|
||||
# check content caching layer and warn of incompatibilities
|
||||
if (settings.get('CACHE_CONTENT', False) and
|
||||
settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and
|
||||
settings.get('WITH_FUTURE_DATES', DEFAULT_CONFIG['WITH_FUTURE_DATES'])):
|
||||
logger.warning('WITH_FUTURE_DATES conflicts with '
|
||||
"CONTENT_CACHING_LAYER set to 'generator', "
|
||||
"use 'reader' layer instead")
|
||||
if settings.get('CACHE_CONTENT', False) and \
|
||||
settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and \
|
||||
settings.get('WITH_FUTURE_DATES', False):
|
||||
logger.warning(
|
||||
"WITH_FUTURE_DATES conflicts with CONTENT_CACHING_LAYER "
|
||||
"set to 'generator', use 'reader' layer instead")
|
||||
|
||||
# Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
|
||||
feed_keys = [
|
||||
|
|
@ -296,7 +306,7 @@ def configure_settings(settings):
|
|||
logger.warning('Feeds generated without SITEURL set properly may'
|
||||
' not be valid')
|
||||
|
||||
if not 'TIMEZONE' in settings:
|
||||
if 'TIMEZONE' not in settings:
|
||||
logger.warning(
|
||||
'No timezone information specified in the settings. Assuming'
|
||||
' your timezone is UTC for feed generation. Check '
|
||||
|
|
@ -321,7 +331,8 @@ def configure_settings(settings):
|
|||
old_key = key + '_DIR'
|
||||
new_key = key + '_PATHS'
|
||||
if old_key in settings:
|
||||
logger.warning('Deprecated setting %s, moving it to %s list',
|
||||
logger.warning(
|
||||
'Deprecated setting %s, moving it to %s list',
|
||||
old_key, new_key)
|
||||
settings[new_key] = [settings[old_key]] # also make a list
|
||||
del settings[old_key]
|
||||
|
|
@ -365,8 +376,9 @@ def configure_settings(settings):
|
|||
for old, new, doc in [
|
||||
('LESS_GENERATOR', 'the Webassets plugin', None),
|
||||
('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',
|
||||
'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'),
|
||||
]:
|
||||
'https://github.com/getpelican/pelican/'
|
||||
'blob/master/docs/settings.rst#path-metadata'),
|
||||
]:
|
||||
if old in settings:
|
||||
message = 'The {} setting has been removed in favor of {}'.format(
|
||||
old, new)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
from blinker import signal
|
||||
|
||||
# Run-level signals:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
AUTHOR = 'Alexis Métaireau'
|
||||
SITENAME = "Alexis' log"
|
||||
SITEURL = 'http://blog.notmyidea.org'
|
||||
|
|
@ -31,17 +31,16 @@ DEFAULT_METADATA = {'yeah': 'it is'}
|
|||
# path-specific metadata
|
||||
EXTRA_PATH_METADATA = {
|
||||
'extra/robots.txt': {'path': 'robots.txt'},
|
||||
}
|
||||
}
|
||||
|
||||
# static paths will be copied without parsing their contents
|
||||
STATIC_PATHS = [
|
||||
'pictures',
|
||||
'extra/robots.txt',
|
||||
]
|
||||
]
|
||||
|
||||
FORMATTED_FIELDS = ['summary', 'custom_formatted_field']
|
||||
|
||||
# foobar will not be used, because it's not in caps. All configuration keys
|
||||
# have to be in caps
|
||||
foobar = "barbaz"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,25 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
__all__ = ['get_article', 'unittest', ]
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from six import StringIO
|
||||
import logging
|
||||
from logging.handlers import BufferingHandler
|
||||
import unittest
|
||||
import locale
|
||||
|
||||
from functools import wraps
|
||||
from contextlib import contextmanager
|
||||
from tempfile import mkdtemp
|
||||
from functools import wraps
|
||||
from logging.handlers import BufferingHandler
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from six import StringIO
|
||||
|
||||
from pelican.contents import Article
|
||||
from pelican.settings import DEFAULT_CONFIG
|
||||
|
||||
__all__ = ['get_article', 'unittest', ]
|
||||
|
||||
|
||||
@contextmanager
|
||||
def temporary_folder():
|
||||
|
|
@ -167,7 +168,7 @@ def get_settings(**kwargs):
|
|||
Set keyword arguments to override specific settings.
|
||||
"""
|
||||
settings = DEFAULT_CONFIG.copy()
|
||||
for key,value in kwargs.items():
|
||||
for key, value in kwargs.items():
|
||||
settings[key] = value
|
||||
return settings
|
||||
|
||||
|
|
@ -179,10 +180,13 @@ class LogCountHandler(BufferingHandler):
|
|||
logging.handlers.BufferingHandler.__init__(self, capacity)
|
||||
|
||||
def count_logs(self, msg=None, level=None):
|
||||
return len([l for l in self.buffer
|
||||
if (msg is None or re.match(msg, l.getMessage()))
|
||||
and (level is None or l.levelno == level)
|
||||
])
|
||||
return len([
|
||||
l
|
||||
for l
|
||||
in self.buffer
|
||||
if (msg is None or re.match(msg, l.getMessage())) and
|
||||
(level is None or l.levelno == level)
|
||||
])
|
||||
|
||||
|
||||
class LoggedTestCase(unittest.TestCase):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,14 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
from codecs import open
|
||||
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from pelican.generators import ArticlesGenerator, PagesGenerator
|
||||
from pelican.tests.support import get_settings, unittest
|
||||
|
||||
try:
|
||||
from unittest.mock import MagicMock
|
||||
except ImportError:
|
||||
|
|
@ -10,12 +17,6 @@ except ImportError:
|
|||
except ImportError:
|
||||
MagicMock = False
|
||||
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from pelican.generators import ArticlesGenerator, PagesGenerator
|
||||
from pelican.tests.support import unittest, get_settings
|
||||
|
||||
CUR_DIR = os.path.dirname(__file__)
|
||||
CONTENT_DIR = os.path.join(CUR_DIR, 'content')
|
||||
|
||||
|
|
@ -35,7 +36,6 @@ class TestCache(unittest.TestCase):
|
|||
settings['CACHE_PATH'] = self.temp_cache
|
||||
return settings
|
||||
|
||||
|
||||
@unittest.skipUnless(MagicMock, 'Needs Mock module')
|
||||
def test_article_object_caching(self):
|
||||
"""Test Article objects caching at the generator level"""
|
||||
|
|
@ -44,7 +44,6 @@ class TestCache(unittest.TestCase):
|
|||
settings['DEFAULT_DATE'] = (1970, 1, 1)
|
||||
settings['READERS'] = {'asc': None}
|
||||
|
||||
|
||||
generator = ArticlesGenerator(
|
||||
context=settings.copy(), settings=settings,
|
||||
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
|
||||
|
|
@ -108,7 +107,9 @@ class TestCache(unittest.TestCase):
|
|||
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
|
||||
generator.readers.read_file = MagicMock()
|
||||
generator.generate_context()
|
||||
self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
|
||||
self.assertEqual(
|
||||
generator.readers.read_file.call_count,
|
||||
orig_call_count)
|
||||
|
||||
@unittest.skipUnless(MagicMock, 'Needs Mock module')
|
||||
def test_page_object_caching(self):
|
||||
|
|
@ -181,5 +182,6 @@ class TestCache(unittest.TestCase):
|
|||
path=CUR_DIR, theme=settings['THEME'], output_path=None)
|
||||
generator.readers.read_file = MagicMock()
|
||||
generator.generate_context()
|
||||
self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
|
||||
|
||||
self.assertEqual(
|
||||
generator.readers.read_file.call_count,
|
||||
orig_call_count)
|
||||
|
|
|
|||
|
|
@ -1,20 +1,21 @@
|
|||
from __future__ import unicode_literals, absolute_import
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import logging
|
||||
import locale
|
||||
import logging
|
||||
import os.path
|
||||
import six
|
||||
|
||||
from jinja2.utils import generate_lorem_ipsum
|
||||
from posixpath import join as posix_join
|
||||
from sys import platform
|
||||
|
||||
from pelican.contents import (Page, Article, Static, URLWrapper,
|
||||
Author, Category)
|
||||
from jinja2.utils import generate_lorem_ipsum
|
||||
|
||||
import six
|
||||
|
||||
from pelican.contents import Article, Author, Category, Page, Static
|
||||
from pelican.settings import DEFAULT_CONFIG
|
||||
from pelican.signals import content_object_init
|
||||
from pelican.tests.support import LoggedTestCase, mute, unittest, get_settings
|
||||
from pelican.utils import (path_to_url, truncate_html_words, SafeDatetime,
|
||||
posix_join)
|
||||
from pelican.tests.support import LoggedTestCase, get_settings, unittest
|
||||
from pelican.utils import SafeDatetime, path_to_url, truncate_html_words
|
||||
|
||||
|
||||
# generate one paragraph, enclosed with <p>
|
||||
|
|
@ -49,7 +50,7 @@ class TestPage(unittest.TestCase):
|
|||
# them to initialise object's attributes.
|
||||
metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', }
|
||||
page = Page(TEST_CONTENT, metadata=metadata,
|
||||
context={'localsiteurl': ''})
|
||||
context={'localsiteurl': ''})
|
||||
for key, value in metadata.items():
|
||||
self.assertTrue(hasattr(page, key))
|
||||
self.assertEqual(value, getattr(page, key))
|
||||
|
|
@ -139,14 +140,9 @@ class TestPage(unittest.TestCase):
|
|||
page = Page(**page_kwargs)
|
||||
|
||||
# page.locale_date is a unicode string in both python2 and python3
|
||||
dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT'])
|
||||
# dt_date is a byte string in python2, and a unicode string in python3
|
||||
# Let's make sure it is a unicode string (relies on python 3.3 supporting the u prefix)
|
||||
if type(dt_date) != type(u''):
|
||||
# python2:
|
||||
dt_date = unicode(dt_date, 'utf8')
|
||||
dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT'])
|
||||
|
||||
self.assertEqual(page.locale_date, dt_date )
|
||||
self.assertEqual(page.locale_date, dt_date)
|
||||
page_kwargs['settings'] = get_settings()
|
||||
|
||||
# I doubt this can work on all platforms ...
|
||||
|
|
@ -307,10 +303,14 @@ class TestPage(unittest.TestCase):
|
|||
args['settings'] = get_settings()
|
||||
args['source_path'] = 'content'
|
||||
args['context']['filenames'] = {
|
||||
'images/poster.jpg': type(cls_name, (object,), {'url': 'images/poster.jpg'}),
|
||||
'assets/video.mp4': type(cls_name, (object,), {'url': 'assets/video.mp4'}),
|
||||
'images/graph.svg': type(cls_name, (object,), {'url': 'images/graph.svg'}),
|
||||
'reference.rst': type(cls_name, (object,), {'url': 'reference.html'}),
|
||||
'images/poster.jpg': type(
|
||||
cls_name, (object,), {'url': 'images/poster.jpg'}),
|
||||
'assets/video.mp4': type(
|
||||
cls_name, (object,), {'url': 'assets/video.mp4'}),
|
||||
'images/graph.svg': type(
|
||||
cls_name, (object,), {'url': 'images/graph.svg'}),
|
||||
'reference.rst': type(
|
||||
cls_name, (object,), {'url': 'reference.html'}),
|
||||
}
|
||||
|
||||
# video.poster
|
||||
|
|
@ -325,20 +325,25 @@ class TestPage(unittest.TestCase):
|
|||
content,
|
||||
'There is a video with poster '
|
||||
'<video controls poster="http://notmyidea.org/images/poster.jpg">'
|
||||
'<source src="http://notmyidea.org/assets/video.mp4" type="video/mp4">'
|
||||
'<source src="http://notmyidea.org/assets/video.mp4"'
|
||||
' type="video/mp4">'
|
||||
'</video>'
|
||||
)
|
||||
|
||||
# object.data
|
||||
args['content'] = (
|
||||
'There is a svg object '
|
||||
'<object data="{filename}/images/graph.svg" type="image/svg+xml"></object>'
|
||||
'<object data="{filename}/images/graph.svg"'
|
||||
' type="image/svg+xml">'
|
||||
'</object>'
|
||||
)
|
||||
content = Page(**args).get_content('http://notmyidea.org')
|
||||
self.assertEqual(
|
||||
content,
|
||||
'There is a svg object '
|
||||
'<object data="http://notmyidea.org/images/graph.svg" type="image/svg+xml"></object>'
|
||||
'<object data="http://notmyidea.org/images/graph.svg"'
|
||||
' type="image/svg+xml">'
|
||||
'</object>'
|
||||
)
|
||||
|
||||
# blockquote.cite
|
||||
|
|
@ -350,7 +355,9 @@ class TestPage(unittest.TestCase):
|
|||
self.assertEqual(
|
||||
content,
|
||||
'There is a blockquote with cite attribute '
|
||||
'<blockquote cite="http://notmyidea.org/reference.html">blah blah</blockquote>'
|
||||
'<blockquote cite="http://notmyidea.org/reference.html">'
|
||||
'blah blah'
|
||||
'</blockquote>'
|
||||
)
|
||||
|
||||
def test_intrasite_link_markdown_spaces(self):
|
||||
|
|
@ -401,17 +408,19 @@ class TestArticle(TestPage):
|
|||
|
||||
def test_slugify_category_author(self):
|
||||
settings = get_settings()
|
||||
settings['SLUG_SUBSTITUTIONS'] = [ ('C#', 'csharp') ]
|
||||
settings['SLUG_SUBSTITUTIONS'] = [('C#', 'csharp')]
|
||||
settings['ARTICLE_URL'] = '{author}/{category}/{slug}/'
|
||||
settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html'
|
||||
article_kwargs = self._copy_page_kwargs()
|
||||
article_kwargs['metadata']['author'] = Author("O'Brien", settings)
|
||||
article_kwargs['metadata']['category'] = Category('C# & stuff', settings)
|
||||
article_kwargs['metadata']['category'] = Category(
|
||||
'C# & stuff', settings)
|
||||
article_kwargs['metadata']['title'] = 'fnord'
|
||||
article_kwargs['settings'] = settings
|
||||
article = Article(**article_kwargs)
|
||||
self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/')
|
||||
self.assertEqual(article.save_as, 'obrien/csharp-stuff/fnord/index.html')
|
||||
self.assertEqual(
|
||||
article.save_as, 'obrien/csharp-stuff/fnord/index.html')
|
||||
|
||||
|
||||
class TestStatic(LoggedTestCase):
|
||||
|
|
@ -426,7 +435,8 @@ class TestStatic(LoggedTestCase):
|
|||
self.context = self.settings.copy()
|
||||
|
||||
self.static = Static(content=None, metadata={}, settings=self.settings,
|
||||
source_path=posix_join('dir', 'foo.jpg'), context=self.context)
|
||||
source_path=posix_join('dir', 'foo.jpg'),
|
||||
context=self.context)
|
||||
|
||||
self.context['filenames'] = {self.static.source_path: self.static}
|
||||
|
||||
|
|
@ -436,8 +446,10 @@ class TestStatic(LoggedTestCase):
|
|||
def test_attach_to_same_dir(self):
|
||||
"""attach_to() overrides a static file's save_as and url.
|
||||
"""
|
||||
page = Page(content="fake page",
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
page = Page(
|
||||
content="fake page",
|
||||
metadata={'title': 'fakepage'},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join('dir', 'fakepage.md'))
|
||||
self.static.attach_to(page)
|
||||
|
||||
|
|
@ -449,7 +461,7 @@ class TestStatic(LoggedTestCase):
|
|||
"""attach_to() preserves dirs inside the linking document dir.
|
||||
"""
|
||||
page = Page(content="fake page", metadata={'title': 'fakepage'},
|
||||
settings=self.settings, source_path='fakepage.md')
|
||||
settings=self.settings, source_path='fakepage.md')
|
||||
self.static.attach_to(page)
|
||||
|
||||
expected_save_as = os.path.join('outpages', 'dir', 'foo.jpg')
|
||||
|
|
@ -460,8 +472,8 @@ class TestStatic(LoggedTestCase):
|
|||
"""attach_to() ignores dirs outside the linking document dir.
|
||||
"""
|
||||
page = Page(content="fake page",
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'))
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'))
|
||||
self.static.attach_to(page)
|
||||
|
||||
expected_save_as = os.path.join('outpages', 'foo.jpg')
|
||||
|
|
@ -472,8 +484,8 @@ class TestStatic(LoggedTestCase):
|
|||
"""attach_to() does nothing when called a second time.
|
||||
"""
|
||||
page = Page(content="fake page",
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
source_path=os.path.join('dir', 'fakepage.md'))
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
source_path=os.path.join('dir', 'fakepage.md'))
|
||||
|
||||
self.static.attach_to(page)
|
||||
|
||||
|
|
@ -481,8 +493,10 @@ class TestStatic(LoggedTestCase):
|
|||
otherdir_settings.update(dict(
|
||||
PAGE_SAVE_AS=os.path.join('otherpages', '{slug}.html'),
|
||||
PAGE_URL='otherpages/{slug}.html'))
|
||||
otherdir_page = Page(content="other page",
|
||||
metadata={'title': 'otherpage'}, settings=otherdir_settings,
|
||||
otherdir_page = Page(
|
||||
content="other page",
|
||||
metadata={'title': 'otherpage'},
|
||||
settings=otherdir_settings,
|
||||
source_path=os.path.join('dir', 'otherpage.md'))
|
||||
|
||||
self.static.attach_to(otherdir_page)
|
||||
|
|
@ -497,8 +511,10 @@ class TestStatic(LoggedTestCase):
|
|||
"""
|
||||
original_save_as = self.static.save_as
|
||||
|
||||
page = Page(content="fake page",
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
page = Page(
|
||||
content="fake page",
|
||||
metadata={'title': 'fakepage'},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join('dir', 'fakepage.md'))
|
||||
self.static.attach_to(page)
|
||||
|
||||
|
|
@ -511,8 +527,10 @@ class TestStatic(LoggedTestCase):
|
|||
"""
|
||||
original_url = self.static.url
|
||||
|
||||
page = Page(content="fake page",
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
page = Page(
|
||||
content="fake page",
|
||||
metadata={'title': 'fakepage'},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join('dir', 'fakepage.md'))
|
||||
self.static.attach_to(page)
|
||||
|
||||
|
|
@ -523,13 +541,15 @@ class TestStatic(LoggedTestCase):
|
|||
"""attach_to() does not override paths that were overridden elsewhere.
|
||||
(For example, by the user with EXTRA_PATH_METADATA)
|
||||
"""
|
||||
customstatic = Static(content=None,
|
||||
customstatic = Static(
|
||||
content=None,
|
||||
metadata=dict(save_as='customfoo.jpg', url='customfoo.jpg'),
|
||||
settings=self.settings,
|
||||
source_path=os.path.join('dir', 'foo.jpg'),
|
||||
context=self.settings.copy())
|
||||
|
||||
page = Page(content="fake page",
|
||||
page = Page(
|
||||
content="fake page",
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
source_path=os.path.join('dir', 'fakepage.md'))
|
||||
|
||||
|
|
@ -542,13 +562,16 @@ class TestStatic(LoggedTestCase):
|
|||
"""{attach} link syntax triggers output path override & url replacement.
|
||||
"""
|
||||
html = '<a href="{attach}../foo.jpg">link</a>'
|
||||
page = Page(content=html,
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
page = Page(
|
||||
content=html,
|
||||
metadata={'title': 'fakepage'},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
|
||||
context=self.context)
|
||||
content = page.get_content('')
|
||||
|
||||
self.assertNotEqual(content, html,
|
||||
self.assertNotEqual(
|
||||
content, html,
|
||||
"{attach} link syntax did not trigger URL replacement.")
|
||||
|
||||
expected_save_as = os.path.join('outpages', 'foo.jpg')
|
||||
|
|
@ -561,7 +584,8 @@ class TestStatic(LoggedTestCase):
|
|||
html = '<a href="{tag}foo">link</a>'
|
||||
page = Page(
|
||||
content=html,
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
metadata={'title': 'fakepage'},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
|
||||
context=self.context)
|
||||
content = page.get_content('')
|
||||
|
|
@ -572,8 +596,10 @@ class TestStatic(LoggedTestCase):
|
|||
"{category} link syntax triggers url replacement."
|
||||
|
||||
html = '<a href="{category}foo">link</a>'
|
||||
page = Page(content=html,
|
||||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
page = Page(
|
||||
content=html,
|
||||
metadata={'title': 'fakepage'},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
|
||||
context=self.context)
|
||||
content = page.get_content('')
|
||||
|
|
@ -588,11 +614,11 @@ class TestStatic(LoggedTestCase):
|
|||
metadata={'title': 'fakepage'}, settings=self.settings,
|
||||
source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
|
||||
context=self.context)
|
||||
content = page.get_content('')
|
||||
content = page.get_content('')
|
||||
|
||||
self.assertEqual(content, html)
|
||||
self.assertLogCountEqual(
|
||||
count=1,
|
||||
msg="Replacement Indicator 'unknown' not recognized, "
|
||||
"skipping replacement",
|
||||
level=logging.WARNING)
|
||||
count=1,
|
||||
msg="Replacement Indicator 'unknown' not recognized, "
|
||||
"skipping replacement",
|
||||
level=logging.WARNING)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,18 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import locale
|
||||
import os
|
||||
|
||||
from codecs import open
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator,
|
||||
StaticGenerator, TemplatePagesGenerator)
|
||||
from pelican.tests.support import get_settings, unittest
|
||||
from pelican.writers import Writer
|
||||
|
||||
try:
|
||||
from unittest.mock import MagicMock
|
||||
except ImportError:
|
||||
|
|
@ -10,14 +20,7 @@ except ImportError:
|
|||
from mock import MagicMock
|
||||
except ImportError:
|
||||
MagicMock = False
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from pelican.generators import (Generator, ArticlesGenerator, PagesGenerator,
|
||||
StaticGenerator, TemplatePagesGenerator)
|
||||
from pelican.writers import Writer
|
||||
from pelican.tests.support import unittest, get_settings
|
||||
import locale
|
||||
|
||||
CUR_DIR = os.path.dirname(__file__)
|
||||
CONTENT_DIR = os.path.join(CUR_DIR, 'content')
|
||||
|
|
@ -35,7 +38,6 @@ class TestGenerator(unittest.TestCase):
|
|||
def tearDown(self):
|
||||
locale.setlocale(locale.LC_ALL, self.old_locale)
|
||||
|
||||
|
||||
def test_include_path(self):
|
||||
self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'}
|
||||
|
||||
|
|
@ -52,7 +54,8 @@ class TestGenerator(unittest.TestCase):
|
|||
"""Test that Generator.get_files() properly excludes directories.
|
||||
"""
|
||||
# We use our own Generator so we can give it our own content path
|
||||
generator = Generator(context=self.settings.copy(),
|
||||
generator = Generator(
|
||||
context=self.settings.copy(),
|
||||
settings=self.settings,
|
||||
path=os.path.join(CUR_DIR, 'nested_content'),
|
||||
theme=self.settings['THEME'], output_path=None)
|
||||
|
|
@ -60,34 +63,42 @@ class TestGenerator(unittest.TestCase):
|
|||
filepaths = generator.get_files(paths=['maindir'])
|
||||
found_files = {os.path.basename(f) for f in filepaths}
|
||||
expected_files = {'maindir.md', 'subdir.md'}
|
||||
self.assertFalse(expected_files - found_files,
|
||||
self.assertFalse(
|
||||
expected_files - found_files,
|
||||
"get_files() failed to find one or more files")
|
||||
|
||||
# Test string as `paths` argument rather than list
|
||||
filepaths = generator.get_files(paths='maindir')
|
||||
found_files = {os.path.basename(f) for f in filepaths}
|
||||
expected_files = {'maindir.md', 'subdir.md'}
|
||||
self.assertFalse(expected_files - found_files,
|
||||
self.assertFalse(
|
||||
expected_files - found_files,
|
||||
"get_files() failed to find one or more files")
|
||||
|
||||
filepaths = generator.get_files(paths=[''], exclude=['maindir'])
|
||||
found_files = {os.path.basename(f) for f in filepaths}
|
||||
self.assertNotIn('maindir.md', found_files,
|
||||
self.assertNotIn(
|
||||
'maindir.md', found_files,
|
||||
"get_files() failed to exclude a top-level directory")
|
||||
self.assertNotIn('subdir.md', found_files,
|
||||
self.assertNotIn(
|
||||
'subdir.md', found_files,
|
||||
"get_files() failed to exclude a subdir of an excluded directory")
|
||||
|
||||
filepaths = generator.get_files(paths=[''],
|
||||
filepaths = generator.get_files(
|
||||
paths=[''],
|
||||
exclude=[os.path.join('maindir', 'subdir')])
|
||||
found_files = {os.path.basename(f) for f in filepaths}
|
||||
self.assertNotIn('subdir.md', found_files,
|
||||
self.assertNotIn(
|
||||
'subdir.md', found_files,
|
||||
"get_files() failed to exclude a subdirectory")
|
||||
|
||||
filepaths = generator.get_files(paths=[''], exclude=['subdir'])
|
||||
found_files = {os.path.basename(f) for f in filepaths}
|
||||
self.assertIn('subdir.md', found_files,
|
||||
self.assertIn(
|
||||
'subdir.md', found_files,
|
||||
"get_files() excluded a subdirectory by name, ignoring its path")
|
||||
|
||||
|
||||
class TestArticlesGenerator(unittest.TestCase):
|
||||
|
||||
@classmethod
|
||||
|
|
@ -96,7 +107,7 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
settings['DEFAULT_CATEGORY'] = 'Default'
|
||||
settings['DEFAULT_DATE'] = (1970, 1, 1)
|
||||
settings['READERS'] = {'asc': None}
|
||||
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
|
||||
settings['CACHE_CONTENT'] = False
|
||||
|
||||
cls.generator = ArticlesGenerator(
|
||||
context=settings.copy(), settings=settings,
|
||||
|
|
@ -152,25 +163,30 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
['Test mkd File', 'published', 'test', 'article'],
|
||||
['This is a super article !', 'published', 'Yeah', 'article'],
|
||||
['This is a super article !', 'published', 'Yeah', 'article'],
|
||||
['Article with Nonconformant HTML meta tags', 'published', 'Default', 'article'],
|
||||
['Article with Nonconformant HTML meta tags', 'published',
|
||||
'Default', 'article'],
|
||||
['This is a super article !', 'published', 'yeah', 'article'],
|
||||
['This is a super article !', 'published', 'yeah', 'article'],
|
||||
['This is a super article !', 'published', 'yeah', 'article'],
|
||||
['This is a super article !', 'published', 'Default', 'article'],
|
||||
['This is an article with category !', 'published', 'yeah',
|
||||
'article'],
|
||||
['This is an article with multiple authors!', 'published', 'Default', 'article'],
|
||||
['This is an article with multiple authors!', 'published', 'Default', 'article'],
|
||||
['This is an article with multiple authors in list format!', 'published', 'Default', 'article'],
|
||||
['This is an article with multiple authors in lastname, firstname format!', 'published', 'Default', 'article'],
|
||||
['This is an article with multiple authors!', 'published',
|
||||
'Default', 'article'],
|
||||
['This is an article with multiple authors!', 'published',
|
||||
'Default', 'article'],
|
||||
['This is an article with multiple authors in list format!',
|
||||
'published', 'Default', 'article'],
|
||||
['This is an article with multiple authors in lastname, '
|
||||
'firstname format!', 'published', 'Default', 'article'],
|
||||
['This is an article without category !', 'published', 'Default',
|
||||
'article'],
|
||||
'article'],
|
||||
['This is an article without category !', 'published',
|
||||
'TestCategory', 'article'],
|
||||
['An Article With Code Block To Test Typogrify Ignore',
|
||||
'published', 'Default', 'article'],
|
||||
['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'published',
|
||||
'指導書', 'article'],
|
||||
'published', 'Default', 'article'],
|
||||
['マックOS X 10.8でパイソンとVirtualenvをインストールと設定',
|
||||
'published', '指導書', 'article'],
|
||||
]
|
||||
self.assertEqual(sorted(articles_expected), sorted(self.articles))
|
||||
|
||||
|
|
@ -292,7 +308,7 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
generator.generate_period_archives(write)
|
||||
dates = [d for d in generator.dates if d.date.year == 1970]
|
||||
self.assertEqual(len(dates), 1)
|
||||
#among other things it must have at least been called with this
|
||||
# among other things it must have at least been called with this
|
||||
settings["period"] = (1970,)
|
||||
write.assert_called_with("posts/1970/index.html",
|
||||
generator.get_template("period_archives"),
|
||||
|
|
@ -300,37 +316,42 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
blog=True, dates=dates)
|
||||
|
||||
del settings["period"]
|
||||
settings['MONTH_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/index.html'
|
||||
settings['MONTH_ARCHIVE_SAVE_AS'] = \
|
||||
'posts/{date:%Y}/{date:%b}/index.html'
|
||||
generator = ArticlesGenerator(
|
||||
context=settings, settings=settings,
|
||||
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
|
||||
generator.generate_context()
|
||||
write = MagicMock()
|
||||
generator.generate_period_archives(write)
|
||||
dates = [d for d in generator.dates if d.date.year == 1970
|
||||
and d.date.month == 1]
|
||||
dates = [d for d in generator.dates
|
||||
if d.date.year == 1970 and d.date.month == 1]
|
||||
self.assertEqual(len(dates), 1)
|
||||
settings["period"] = (1970, "January")
|
||||
#among other things it must have at least been called with this
|
||||
# among other things it must have at least been called with this
|
||||
write.assert_called_with("posts/1970/Jan/index.html",
|
||||
generator.get_template("period_archives"),
|
||||
settings,
|
||||
blog=True, dates=dates)
|
||||
|
||||
del settings["period"]
|
||||
settings['DAY_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
|
||||
settings['DAY_ARCHIVE_SAVE_AS'] = \
|
||||
'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
|
||||
generator = ArticlesGenerator(
|
||||
context=settings, settings=settings,
|
||||
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
|
||||
generator.generate_context()
|
||||
write = MagicMock()
|
||||
generator.generate_period_archives(write)
|
||||
dates = [d for d in generator.dates if d.date.year == 1970
|
||||
and d.date.month == 1
|
||||
and d.date.day == 1]
|
||||
dates = [
|
||||
d for d in generator.dates if
|
||||
d.date.year == 1970 and
|
||||
d.date.month == 1 and
|
||||
d.date.day == 1
|
||||
]
|
||||
self.assertEqual(len(dates), 1)
|
||||
settings["period"] = (1970, "January", 1)
|
||||
#among other things it must have at least been called with this
|
||||
# among other things it must have at least been called with this
|
||||
write.assert_called_with("posts/1970/Jan/01/index.html",
|
||||
generator.get_template("period_archives"),
|
||||
settings,
|
||||
|
|
@ -347,11 +368,14 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
def test_generate_authors(self):
|
||||
"""Check authors generation."""
|
||||
authors = [author.name for author, _ in self.generator.authors]
|
||||
authors_expected = sorted(['Alexis Métaireau', 'Author, First', 'Author, Second', 'First Author', 'Second Author'])
|
||||
authors_expected = sorted(
|
||||
['Alexis Métaireau', 'Author, First', 'Author, Second',
|
||||
'First Author', 'Second Author'])
|
||||
self.assertEqual(sorted(authors), authors_expected)
|
||||
# test for slug
|
||||
authors = [author.slug for author, _ in self.generator.authors]
|
||||
authors_expected = ['alexis-metaireau', 'author-first', 'author-second', 'first-author', 'second-author']
|
||||
authors_expected = ['alexis-metaireau', 'author-first',
|
||||
'author-second', 'first-author', 'second-author']
|
||||
self.assertEqual(sorted(authors), sorted(authors_expected))
|
||||
|
||||
def test_standard_metadata_in_default_metadata(self):
|
||||
|
|
@ -391,7 +415,6 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
settings = get_settings(filenames={})
|
||||
settings['DEFAULT_CATEGORY'] = 'Default'
|
||||
settings['DEFAULT_DATE'] = (1970, 1, 1)
|
||||
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
|
||||
settings['ARTICLE_ORDER_BY'] = 'title'
|
||||
|
||||
generator = ArticlesGenerator(
|
||||
|
|
@ -420,7 +443,8 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
'This is a super article !',
|
||||
'This is a super article !',
|
||||
'This is an article with category !',
|
||||
'This is an article with multiple authors in lastname, firstname format!',
|
||||
('This is an article with multiple authors in lastname, '
|
||||
'firstname format!'),
|
||||
'This is an article with multiple authors in list format!',
|
||||
'This is an article with multiple authors!',
|
||||
'This is an article with multiple authors!',
|
||||
|
|
@ -435,7 +459,6 @@ class TestArticlesGenerator(unittest.TestCase):
|
|||
settings = get_settings(filenames={})
|
||||
settings['DEFAULT_CATEGORY'] = 'Default'
|
||||
settings['DEFAULT_DATE'] = (1970, 1, 1)
|
||||
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
|
||||
settings['ARTICLE_ORDER_BY'] = 'reversed-title'
|
||||
|
||||
generator = ArticlesGenerator(
|
||||
|
|
@ -561,7 +584,7 @@ class TestPageGenerator(unittest.TestCase):
|
|||
are generated correctly on pages
|
||||
"""
|
||||
settings = get_settings(filenames={})
|
||||
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
|
||||
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
|
||||
settings['CACHE_PATH'] = self.temp_cache
|
||||
settings['DEFAULT_DATE'] = (1970, 1, 1)
|
||||
|
||||
|
|
@ -586,7 +609,6 @@ class TestTemplatePagesGenerator(unittest.TestCase):
|
|||
self.old_locale = locale.setlocale(locale.LC_ALL)
|
||||
locale.setlocale(locale.LC_ALL, str('C'))
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
rmtree(self.temp_content)
|
||||
rmtree(self.temp_output)
|
||||
|
|
@ -632,59 +654,67 @@ class TestStaticGenerator(unittest.TestCase):
|
|||
def test_static_excludes(self):
|
||||
"""Test that StaticGenerator respects STATIC_EXCLUDES.
|
||||
"""
|
||||
settings = get_settings(STATIC_EXCLUDES=['subdir'],
|
||||
PATH=self.content_path, STATIC_PATHS=[''])
|
||||
settings = get_settings(
|
||||
STATIC_EXCLUDES=['subdir'],
|
||||
PATH=self.content_path,
|
||||
STATIC_PATHS=[''],
|
||||
filenames={})
|
||||
context = settings.copy()
|
||||
context['filenames'] = {}
|
||||
|
||||
StaticGenerator(context=context, settings=settings,
|
||||
StaticGenerator(
|
||||
context=context, settings=settings,
|
||||
path=settings['PATH'], output_path=None,
|
||||
theme=settings['THEME']).generate_context()
|
||||
|
||||
staticnames = [os.path.basename(c.source_path)
|
||||
for c in context['staticfiles']]
|
||||
for c in context['staticfiles']]
|
||||
|
||||
self.assertNotIn('subdir_fake_image.jpg', staticnames,
|
||||
self.assertNotIn(
|
||||
'subdir_fake_image.jpg', staticnames,
|
||||
"StaticGenerator processed a file in a STATIC_EXCLUDES directory")
|
||||
self.assertIn('fake_image.jpg', staticnames,
|
||||
self.assertIn(
|
||||
'fake_image.jpg', staticnames,
|
||||
"StaticGenerator skipped a file that it should have included")
|
||||
|
||||
def test_static_exclude_sources(self):
|
||||
"""Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES.
|
||||
"""
|
||||
# Test STATIC_EXCLUDE_SOURCES=True
|
||||
|
||||
settings = get_settings(STATIC_EXCLUDE_SOURCES=True,
|
||||
PATH=self.content_path, PAGE_PATHS=[''], STATIC_PATHS=[''],
|
||||
CACHE_CONTENT=False)
|
||||
settings = get_settings(
|
||||
STATIC_EXCLUDE_SOURCES=True,
|
||||
PATH=self.content_path,
|
||||
PAGE_PATHS=[''],
|
||||
STATIC_PATHS=[''],
|
||||
CACHE_CONTENT=False,
|
||||
filenames={})
|
||||
context = settings.copy()
|
||||
context['filenames'] = {}
|
||||
|
||||
for generator_class in (PagesGenerator, StaticGenerator):
|
||||
generator_class(context=context, settings=settings,
|
||||
generator_class(
|
||||
context=context, settings=settings,
|
||||
path=settings['PATH'], output_path=None,
|
||||
theme=settings['THEME']).generate_context()
|
||||
|
||||
staticnames = [os.path.basename(c.source_path)
|
||||
for c in context['staticfiles']]
|
||||
for c in context['staticfiles']]
|
||||
|
||||
self.assertFalse(any(name.endswith(".md") for name in staticnames),
|
||||
self.assertFalse(
|
||||
any(name.endswith(".md") for name in staticnames),
|
||||
"STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file")
|
||||
|
||||
# Test STATIC_EXCLUDE_SOURCES=False
|
||||
|
||||
settings.update(STATIC_EXCLUDE_SOURCES=False)
|
||||
context = settings.copy()
|
||||
context['filenames'] = {}
|
||||
|
||||
for generator_class in (PagesGenerator, StaticGenerator):
|
||||
generator_class(context=context, settings=settings,
|
||||
generator_class(
|
||||
context=context, settings=settings,
|
||||
path=settings['PATH'], output_path=None,
|
||||
theme=settings['THEME']).generate_context()
|
||||
|
||||
staticnames = [os.path.basename(c.source_path)
|
||||
for c in context['staticfiles']]
|
||||
for c in context['staticfiles']]
|
||||
|
||||
self.assertTrue(any(name.endswith(".md") for name in staticnames),
|
||||
self.assertTrue(
|
||||
any(name.endswith(".md") for name in staticnames),
|
||||
"STATIC_EXCLUDE_SOURCES=False failed to include a markdown file")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +1,19 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import locale
|
||||
import os
|
||||
import re
|
||||
|
||||
import locale
|
||||
from codecs import open
|
||||
from pelican.tools.pelican_import import wp2fields, fields2pelican, decode_wp_content, build_header, build_markdown_header, get_attachments, download_attachments
|
||||
from pelican.tests.support import (unittest, temporary_folder, mute,
|
||||
skipIfNoExecutable)
|
||||
|
||||
from pelican.utils import slugify, path_to_file_url
|
||||
from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder,
|
||||
unittest)
|
||||
from pelican.tools.pelican_import import (build_header, build_markdown_header,
|
||||
decode_wp_content,
|
||||
download_attachments, fields2pelican,
|
||||
get_attachments, wp2fields)
|
||||
from pelican.utils import path_to_file_url, slugify
|
||||
|
||||
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
|
||||
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
|
||||
|
|
@ -32,7 +35,6 @@ except ImportError:
|
|||
LXML = False
|
||||
|
||||
|
||||
|
||||
@skipIfNoExecutable(['pandoc', '--version'])
|
||||
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
|
||||
class TestWordpressXmlImporter(unittest.TestCase):
|
||||
|
|
@ -48,17 +50,19 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
|
||||
def test_ignore_empty_posts(self):
|
||||
self.assertTrue(self.posts)
|
||||
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
|
||||
self.assertTrue(title.strip())
|
||||
for (title, content, fname, date, author,
|
||||
categ, tags, status, kind, format) in self.posts:
|
||||
self.assertTrue(title.strip())
|
||||
|
||||
def test_recognise_page_kind(self):
|
||||
""" Check that we recognise pages in wordpress, as opposed to posts """
|
||||
self.assertTrue(self.posts)
|
||||
# Collect (title, filename, kind) of non-empty posts recognised as page
|
||||
pages_data = []
|
||||
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
|
||||
if kind == 'page':
|
||||
pages_data.append((title, fname))
|
||||
for (title, content, fname, date, author,
|
||||
categ, tags, status, kind, format) in self.posts:
|
||||
if kind == 'page':
|
||||
pages_data.append((title, fname))
|
||||
self.assertEqual(2, len(pages_data))
|
||||
self.assertEqual(('Page', 'contact'), pages_data[0])
|
||||
self.assertEqual(('Empty Page', 'empty'), pages_data[1])
|
||||
|
|
@ -67,7 +71,8 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
silent_f2p = mute(True)(fields2pelican)
|
||||
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
|
||||
with temporary_folder() as temp:
|
||||
fname = list(silent_f2p(test_post, 'markdown', temp, dirpage=True))[0]
|
||||
fname = list(silent_f2p(test_post, 'markdown',
|
||||
temp, dirpage=True))[0]
|
||||
self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
|
||||
|
||||
def test_dircat(self):
|
||||
|
|
@ -75,10 +80,11 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
test_posts = []
|
||||
for post in self.posts:
|
||||
# check post kind
|
||||
if len(post[5]) > 0: # Has a category
|
||||
if len(post[5]) > 0: # Has a category
|
||||
test_posts.append(post)
|
||||
with temporary_folder() as temp:
|
||||
fnames = list(silent_f2p(test_posts, 'markdown', temp, dircat=True))
|
||||
fnames = list(silent_f2p(test_posts, 'markdown',
|
||||
temp, dircat=True))
|
||||
index = 0
|
||||
for post in test_posts:
|
||||
name = post[2]
|
||||
|
|
@ -92,25 +98,33 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
|
||||
self.assertTrue(self.posts)
|
||||
pages_data = []
|
||||
for title, content, fname, date, author, categ, tags, status, kind, format in self.posts:
|
||||
if kind == 'page' or kind == 'article':
|
||||
pass
|
||||
else:
|
||||
pages_data.append((title, fname))
|
||||
for (title, content, fname, date, author, categ,
|
||||
tags, status, kind, format) in self.posts:
|
||||
if kind == 'page' or kind == 'article':
|
||||
pass
|
||||
else:
|
||||
pages_data.append((title, fname))
|
||||
self.assertEqual(0, len(pages_data))
|
||||
|
||||
def test_recognise_custom_post_type(self):
|
||||
self.assertTrue(self.custposts)
|
||||
cust_data = []
|
||||
for title, content, fname, date, author, categ, tags, status, kind, format in self.custposts:
|
||||
if kind == 'article' or kind == 'page':
|
||||
pass
|
||||
else:
|
||||
cust_data.append((title, kind))
|
||||
for (title, content, fname, date, author, categ,
|
||||
tags, status, kind, format) in self.custposts:
|
||||
if kind == 'article' or kind == 'page':
|
||||
pass
|
||||
else:
|
||||
cust_data.append((title, kind))
|
||||
self.assertEqual(3, len(cust_data))
|
||||
self.assertEqual(('A custom post in category 4', 'custom1'), cust_data[0])
|
||||
self.assertEqual(('A custom post in category 5', 'custom1'), cust_data[1])
|
||||
self.assertEqual(('A 2nd custom post type also in category 5', 'custom2'), cust_data[2])
|
||||
self.assertEqual(
|
||||
('A custom post in category 4', 'custom1'),
|
||||
cust_data[0])
|
||||
self.assertEqual(
|
||||
('A custom post in category 5', 'custom1'),
|
||||
cust_data[1])
|
||||
self.assertEqual(
|
||||
('A 2nd custom post type also in category 5', 'custom2'),
|
||||
cust_data[2])
|
||||
|
||||
def test_custom_posts_put_in_own_dir(self):
|
||||
silent_f2p = mute(True)(fields2pelican)
|
||||
|
|
@ -122,7 +136,8 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
else:
|
||||
test_posts.append(post)
|
||||
with temporary_folder() as temp:
|
||||
fnames = list(silent_f2p(test_posts, 'markdown', temp, wp_custpost = True))
|
||||
fnames = list(silent_f2p(test_posts, 'markdown',
|
||||
temp, wp_custpost=True))
|
||||
index = 0
|
||||
for post in test_posts:
|
||||
name = post[2]
|
||||
|
|
@ -144,7 +159,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
test_posts.append(post)
|
||||
with temporary_folder() as temp:
|
||||
fnames = list(silent_f2p(test_posts, 'markdown', temp,
|
||||
wp_custpost=True, dircat=True))
|
||||
wp_custpost=True, dircat=True))
|
||||
index = 0
|
||||
for post in test_posts:
|
||||
name = post[2]
|
||||
|
|
@ -157,7 +172,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
index += 1
|
||||
|
||||
def test_wp_custpost_true_dirpage_false(self):
|
||||
#pages should only be put in their own directory when dirpage = True
|
||||
# pages should only be put in their own directory when dirpage = True
|
||||
silent_f2p = mute(True)(fields2pelican)
|
||||
test_posts = []
|
||||
for post in self.custposts:
|
||||
|
|
@ -166,7 +181,7 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
test_posts.append(post)
|
||||
with temporary_folder() as temp:
|
||||
fnames = list(silent_f2p(test_posts, 'markdown', temp,
|
||||
wp_custpost=True, dirpage=False))
|
||||
wp_custpost=True, dirpage=False))
|
||||
index = 0
|
||||
for post in test_posts:
|
||||
name = post[2]
|
||||
|
|
@ -175,7 +190,6 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
out_name = fnames[index]
|
||||
self.assertFalse(out_name.endswith(filename))
|
||||
|
||||
|
||||
def test_can_toggle_raw_html_code_parsing(self):
|
||||
def r(f):
|
||||
with open(f, encoding='utf-8') as infile:
|
||||
|
|
@ -184,10 +198,12 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
|
||||
with temporary_folder() as temp:
|
||||
|
||||
rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp))
|
||||
rst_files = (r(f) for f
|
||||
in silent_f2p(self.posts, 'markdown', temp))
|
||||
self.assertTrue(any('<iframe' in rst for rst in rst_files))
|
||||
rst_files = (r(f) for f in silent_f2p(self.posts, 'markdown', temp,
|
||||
strip_raw=True))
|
||||
rst_files = (r(f) for f
|
||||
in silent_f2p(self.posts, 'markdown',
|
||||
temp, strip_raw=True))
|
||||
self.assertFalse(any('<iframe' in rst for rst in rst_files))
|
||||
# no effect in rst
|
||||
rst_files = (r(f) for f in silent_f2p(self.posts, 'rst', temp))
|
||||
|
|
@ -197,13 +213,14 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
self.assertFalse(any('<iframe' in rst for rst in rst_files))
|
||||
|
||||
def test_decode_html_entities_in_titles(self):
|
||||
test_posts = [post for post in self.posts if post[2] == 'html-entity-test']
|
||||
test_posts = [post for post
|
||||
in self.posts if post[2] == 'html-entity-test']
|
||||
self.assertEqual(len(test_posts), 1)
|
||||
|
||||
post = test_posts[0]
|
||||
title = post[0]
|
||||
self.assertTrue(title, "A normal post with some <html> entities in the"
|
||||
" title. You can't miss them.")
|
||||
self.assertTrue(title, "A normal post with some <html> entities in "
|
||||
"the title. You can't miss them.")
|
||||
self.assertNotIn('&', title)
|
||||
|
||||
def test_decode_wp_content_returns_empty(self):
|
||||
|
|
@ -216,14 +233,18 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
encoded_content = encoded_file.read()
|
||||
with open(WORDPRESS_DECODED_CONTENT_SAMPLE, 'r') as decoded_file:
|
||||
decoded_content = decoded_file.read()
|
||||
self.assertEqual(decode_wp_content(encoded_content, br=False), decoded_content)
|
||||
self.assertEqual(
|
||||
decode_wp_content(encoded_content, br=False),
|
||||
decoded_content)
|
||||
|
||||
def test_preserve_verbatim_formatting(self):
|
||||
def r(f):
|
||||
with open(f, encoding='utf-8') as infile:
|
||||
return infile.read()
|
||||
silent_f2p = mute(True)(fields2pelican)
|
||||
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
|
||||
test_post = filter(
|
||||
lambda p: p[0].startswith("Code in List"),
|
||||
self.posts)
|
||||
with temporary_folder() as temp:
|
||||
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
|
||||
self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md))
|
||||
|
|
@ -231,14 +252,17 @@ class TestWordpressXmlImporter(unittest.TestCase):
|
|||
|
||||
for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0)
|
||||
print_line = re.search(r'\s+print i', md).group(0)
|
||||
self.assertTrue(for_line.rindex('for') < print_line.rindex('print'))
|
||||
self.assertTrue(
|
||||
for_line.rindex('for') < print_line.rindex('print'))
|
||||
|
||||
def test_code_in_list(self):
|
||||
def r(f):
|
||||
with open(f, encoding='utf-8') as infile:
|
||||
return infile.read()
|
||||
silent_f2p = mute(True)(fields2pelican)
|
||||
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
|
||||
test_post = filter(
|
||||
lambda p: p[0].startswith("Code in List"),
|
||||
self.posts)
|
||||
with temporary_folder() as temp:
|
||||
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
|
||||
sample_line = re.search(r'- This is a code sample', md).group(0)
|
||||
|
|
@ -285,26 +309,29 @@ class TestBuildHeader(unittest.TestCase):
|
|||
self.assertEqual(build_header(*header_data), expected_docutils)
|
||||
self.assertEqual(build_markdown_header(*header_data), expected_md)
|
||||
|
||||
|
||||
def test_build_header_with_east_asian_characters(self):
|
||||
header = build_header('これは広い幅の文字だけで構成されたタイトルです',
|
||||
None, None, None, None, None)
|
||||
None, None, None, None, None)
|
||||
|
||||
self.assertEqual(header,
|
||||
'これは広い幅の文字だけで構成されたタイトルです\n' +
|
||||
'##############################################\n\n')
|
||||
('これは広い幅の文字だけで構成されたタイトルです\n'
|
||||
'##############################################'
|
||||
'\n\n'))
|
||||
|
||||
def test_galleries_added_to_header(self):
|
||||
header = build_header('test', None, None, None, None,
|
||||
None, attachments=['output/test1', 'output/test2'])
|
||||
self.assertEqual(header, 'test\n####\n' + ':attachments: output/test1, '
|
||||
+ 'output/test2\n\n')
|
||||
header = build_header('test', None, None, None, None, None,
|
||||
attachments=['output/test1', 'output/test2'])
|
||||
self.assertEqual(header, ('test\n####\n'
|
||||
':attachments: output/test1, '
|
||||
'output/test2\n\n'))
|
||||
|
||||
def test_galleries_added_to_markdown_header(self):
|
||||
header = build_markdown_header('test', None, None, None, None, None,
|
||||
attachments=['output/test1', 'output/test2'])
|
||||
self.assertEqual(header, 'Title: test\n' + 'Attachments: output/test1, '
|
||||
+ 'output/test2\n\n')
|
||||
attachments=['output/test1',
|
||||
'output/test2'])
|
||||
self.assertEqual(
|
||||
header,
|
||||
'Title: test\nAttachments: output/test1, output/test2\n\n')
|
||||
|
||||
|
||||
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
|
||||
|
|
@ -326,14 +353,24 @@ class TestWordpressXMLAttachements(unittest.TestCase):
|
|||
self.assertTrue(self.attachments)
|
||||
for post in self.attachments.keys():
|
||||
if post is None:
|
||||
self.assertTrue(self.attachments[post][0] == 'https://upload.wikimedia.org/wikipedia/commons/thumb/2/2c/Pelican_lakes_entrance02.jpg/240px-Pelican_lakes_entrance02.jpg')
|
||||
expected = ('https://upload.wikimedia.org/wikipedia/commons/'
|
||||
'thumb/2/2c/Pelican_lakes_entrance02.jpg/'
|
||||
'240px-Pelican_lakes_entrance02.jpg')
|
||||
self.assertEqual(self.attachments[post][0], expected)
|
||||
elif post == 'with-excerpt':
|
||||
self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain/not_an_image.jpg')
|
||||
self.assertTrue(self.attachments[post][1] == 'http://en.wikipedia.org/wiki/File:Pelikan_Walvis_Bay.jpg')
|
||||
expected_invalid = ('http://thisurlisinvalid.notarealdomain/'
|
||||
'not_an_image.jpg')
|
||||
expected_pelikan = ('http://en.wikipedia.org/wiki/'
|
||||
'File:Pelikan_Walvis_Bay.jpg')
|
||||
self.assertEqual(self.attachments[post][0], expected_invalid)
|
||||
self.assertEqual(self.attachments[post][1], expected_pelikan)
|
||||
elif post == 'with-tags':
|
||||
self.assertTrue(self.attachments[post][0] == 'http://thisurlisinvalid.notarealdomain')
|
||||
expected_invalid = ('http://thisurlisinvalid.notarealdomain')
|
||||
self.assertEqual(self.attachments[post][0], expected_invalid)
|
||||
else:
|
||||
self.fail('all attachments should match to a filename or None, {}'.format(post))
|
||||
self.fail('all attachments should match to a '
|
||||
'filename or None, {}'
|
||||
.format(post))
|
||||
|
||||
def test_download_attachments(self):
|
||||
real_file = os.path.join(CUR_DIR, 'content/article.rst')
|
||||
|
|
@ -344,4 +381,6 @@ class TestWordpressXMLAttachements(unittest.TestCase):
|
|||
locations = list(silent_da(temp, [good_url, bad_url]))
|
||||
self.assertEqual(1, len(locations))
|
||||
directory = locations[0]
|
||||
self.assertTrue(directory.endswith(os.path.join('content', 'article.rst')), directory)
|
||||
self.assertTrue(
|
||||
directory.endswith(os.path.join('content', 'article.rst')),
|
||||
directory)
|
||||
|
|
|
|||
|
|
@ -1,18 +1,21 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import locale
|
||||
|
||||
from pelican.tests.support import unittest, get_settings
|
||||
|
||||
from pelican.paginator import Paginator
|
||||
from pelican.contents import Article, Author
|
||||
from pelican.settings import DEFAULT_CONFIG
|
||||
from jinja2.utils import generate_lorem_ipsum
|
||||
|
||||
from pelican.contents import Article, Author
|
||||
from pelican.paginator import Paginator
|
||||
from pelican.settings import DEFAULT_CONFIG
|
||||
from pelican.tests.support import get_settings, unittest
|
||||
|
||||
|
||||
# generate one paragraph, enclosed with <p>
|
||||
TEST_CONTENT = str(generate_lorem_ipsum(n=1))
|
||||
TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False)
|
||||
|
||||
|
||||
class TestPage(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super(TestPage, self).setUp()
|
||||
|
|
@ -49,7 +52,8 @@ class TestPage(unittest.TestCase):
|
|||
)
|
||||
|
||||
self.page_kwargs['metadata']['author'] = Author('Blogger', settings)
|
||||
object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)]
|
||||
object_list = [Article(**self.page_kwargs),
|
||||
Article(**self.page_kwargs)]
|
||||
paginator = Paginator('foobar.foo', object_list, settings)
|
||||
page = paginator.page(1)
|
||||
self.assertEqual(page.save_as, 'foobar.foo')
|
||||
|
|
|
|||
|
|
@ -1,23 +1,25 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import collections
|
||||
import os
|
||||
import sys
|
||||
from tempfile import mkdtemp
|
||||
from shutil import rmtree
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from pelican import Pelican
|
||||
from pelican.generators import StaticGenerator
|
||||
from pelican.settings import read_settings
|
||||
from pelican.tests.support import LoggedTestCase, mute, locale_available, unittest
|
||||
from pelican.tests.support import (LoggedTestCase, locale_available,
|
||||
mute, unittest)
|
||||
|
||||
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
SAMPLES_PATH = os.path.abspath(os.path.join(
|
||||
CURRENT_DIR, os.pardir, os.pardir, 'samples'))
|
||||
CURRENT_DIR, os.pardir, os.pardir, 'samples'))
|
||||
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output'))
|
||||
|
||||
INPUT_PATH = os.path.join(SAMPLES_PATH, "content")
|
||||
|
|
@ -27,13 +29,10 @@ SAMPLE_FR_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf_FR.py")
|
|||
|
||||
def recursiveDiff(dcmp):
|
||||
diff = {
|
||||
'diff_files': [os.path.join(dcmp.right, f)
|
||||
for f in dcmp.diff_files],
|
||||
'left_only': [os.path.join(dcmp.right, f)
|
||||
for f in dcmp.left_only],
|
||||
'right_only': [os.path.join(dcmp.right, f)
|
||||
for f in dcmp.right_only],
|
||||
}
|
||||
'diff_files': [os.path.join(dcmp.right, f) for f in dcmp.diff_files],
|
||||
'left_only': [os.path.join(dcmp.right, f) for f in dcmp.left_only],
|
||||
'right_only': [os.path.join(dcmp.right, f) for f in dcmp.right_only],
|
||||
}
|
||||
for sub_dcmp in dcmp.subdirs.values():
|
||||
for k, v in recursiveDiff(sub_dcmp).items():
|
||||
diff[k] += v
|
||||
|
|
@ -60,9 +59,13 @@ class TestPelican(LoggedTestCase):
|
|||
|
||||
def assertDirsEqual(self, left_path, right_path):
|
||||
out, err = subprocess.Popen(
|
||||
['git', 'diff', '--no-ext-diff', '--exit-code', '-w', left_path, right_path],
|
||||
env={str('PAGER'): str('')}, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
['git', 'diff', '--no-ext-diff', '--exit-code',
|
||||
'-w', left_path, right_path],
|
||||
env={str('PAGER'): str('')},
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE
|
||||
).communicate()
|
||||
|
||||
def ignorable_git_crlf_errors(line):
|
||||
# Work around for running tests on Windows
|
||||
for msg in [
|
||||
|
|
@ -85,9 +88,11 @@ class TestPelican(LoggedTestCase):
|
|||
pelican = Pelican(settings=read_settings(path=None))
|
||||
generator_classes = pelican.get_generator_classes()
|
||||
|
||||
self.assertTrue(generator_classes[-1] is StaticGenerator,
|
||||
self.assertTrue(
|
||||
generator_classes[-1] is StaticGenerator,
|
||||
"StaticGenerator must be the last generator, but it isn't!")
|
||||
self.assertIsInstance(generator_classes, collections.Sequence,
|
||||
self.assertIsInstance(
|
||||
generator_classes, collections.Sequence,
|
||||
"get_generator_classes() must return a Sequence to preserve order")
|
||||
|
||||
def test_basic_generation_works(self):
|
||||
|
|
@ -98,10 +103,11 @@ class TestPelican(LoggedTestCase):
|
|||
'OUTPUT_PATH': self.temp_path,
|
||||
'CACHE_PATH': self.temp_cache,
|
||||
'LOCALE': locale.normalize('en_US'),
|
||||
})
|
||||
})
|
||||
pelican = Pelican(settings=settings)
|
||||
mute(True)(pelican.run)()
|
||||
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'basic'))
|
||||
self.assertDirsEqual(
|
||||
self.temp_path, os.path.join(OUTPUT_PATH, 'basic'))
|
||||
self.assertLogCountEqual(
|
||||
count=3,
|
||||
msg="Unable to find.*skipping url replacement",
|
||||
|
|
@ -114,10 +120,11 @@ class TestPelican(LoggedTestCase):
|
|||
'OUTPUT_PATH': self.temp_path,
|
||||
'CACHE_PATH': self.temp_cache,
|
||||
'LOCALE': locale.normalize('en_US'),
|
||||
})
|
||||
})
|
||||
pelican = Pelican(settings=settings)
|
||||
mute(True)(pelican.run)()
|
||||
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom'))
|
||||
self.assertDirsEqual(
|
||||
self.temp_path, os.path.join(OUTPUT_PATH, 'custom'))
|
||||
|
||||
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
|
||||
locale_available('French'), 'French locale needed')
|
||||
|
|
@ -133,10 +140,11 @@ class TestPelican(LoggedTestCase):
|
|||
'OUTPUT_PATH': self.temp_path,
|
||||
'CACHE_PATH': self.temp_cache,
|
||||
'LOCALE': our_locale,
|
||||
})
|
||||
})
|
||||
pelican = Pelican(settings=settings)
|
||||
mute(True)(pelican.run)()
|
||||
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale'))
|
||||
self.assertDirsEqual(
|
||||
self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale'))
|
||||
|
||||
def test_theme_static_paths_copy(self):
|
||||
# the same thing with a specified set of settings should work
|
||||
|
|
@ -146,8 +154,9 @@ class TestPelican(LoggedTestCase):
|
|||
'CACHE_PATH': self.temp_cache,
|
||||
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'),
|
||||
os.path.join(SAMPLES_PATH, 'kinda'),
|
||||
os.path.join(SAMPLES_PATH, 'theme_standard')]
|
||||
})
|
||||
os.path.join(SAMPLES_PATH,
|
||||
'theme_standard')]
|
||||
})
|
||||
pelican = Pelican(settings=settings)
|
||||
mute(True)(pelican.run)()
|
||||
theme_output = os.path.join(self.temp_path, 'theme')
|
||||
|
|
@ -165,8 +174,9 @@ class TestPelican(LoggedTestCase):
|
|||
'PATH': INPUT_PATH,
|
||||
'OUTPUT_PATH': self.temp_path,
|
||||
'CACHE_PATH': self.temp_cache,
|
||||
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'theme_standard')]
|
||||
})
|
||||
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH,
|
||||
'theme_standard')]
|
||||
})
|
||||
|
||||
pelican = Pelican(settings=settings)
|
||||
mute(True)(pelican.run)()
|
||||
|
|
@ -184,9 +194,9 @@ class TestPelican(LoggedTestCase):
|
|||
'WRITE_SELECTED': [
|
||||
os.path.join(self.temp_path, 'oh-yeah.html'),
|
||||
os.path.join(self.temp_path, 'categories.html'),
|
||||
],
|
||||
],
|
||||
'LOCALE': locale.normalize('en_US'),
|
||||
})
|
||||
})
|
||||
pelican = Pelican(settings=settings)
|
||||
logger = logging.getLogger()
|
||||
orig_level = logger.getEffectiveLevel()
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import os
|
||||
|
||||
from pelican import readers
|
||||
from pelican.tests.support import get_settings, unittest
|
||||
from pelican.utils import SafeDatetime
|
||||
from pelican.tests.support import unittest, get_settings
|
||||
|
||||
|
||||
CUR_DIR = os.path.dirname(__file__)
|
||||
CONTENT_PATH = os.path.join(CUR_DIR, 'content')
|
||||
|
|
@ -29,22 +30,26 @@ class ReaderTest(unittest.TestCase):
|
|||
self.assertEqual(
|
||||
value,
|
||||
real_value,
|
||||
'Expected %s to have value %s, but was %s' % (key, value, real_value))
|
||||
'Expected %s to have value %s, but was %s' %
|
||||
(key, value, real_value))
|
||||
else:
|
||||
self.fail(
|
||||
'Expected %s to have value %s, but was not in Dict' % (key, value))
|
||||
'Expected %s to have value %s, but was not in Dict' %
|
||||
(key, value))
|
||||
|
||||
|
||||
class TestAssertDictHasSubset(ReaderTest):
|
||||
def setUp(self):
|
||||
self.dictionary = {
|
||||
'key-a' : 'val-a',
|
||||
'key-b' : 'val-b'}
|
||||
'key-a': 'val-a',
|
||||
'key-b': 'val-b'
|
||||
}
|
||||
|
||||
def tearDown(self):
|
||||
self.dictionary = None
|
||||
|
||||
def test_subset(self):
|
||||
self.assertDictHasSubset(self.dictionary, {'key-a':'val-a'})
|
||||
self.assertDictHasSubset(self.dictionary, {'key-a': 'val-a'})
|
||||
|
||||
def test_equal(self):
|
||||
self.assertDictHasSubset(self.dictionary, self.dictionary)
|
||||
|
|
@ -54,18 +59,17 @@ class TestAssertDictHasSubset(ReaderTest):
|
|||
AssertionError,
|
||||
'Expected.*key-c.*to have value.*val-c.*but was not in Dict',
|
||||
self.assertDictHasSubset,
|
||||
self.dictionary,
|
||||
{'key-c':'val-c'}
|
||||
)
|
||||
self.dictionary,
|
||||
{'key-c': 'val-c'})
|
||||
|
||||
def test_fail_wrong_val(self):
|
||||
self.assertRaisesRegexp(
|
||||
AssertionError,
|
||||
'Expected .*key-a.* to have value .*val-b.* but was .*val-a.*',
|
||||
self.assertDictHasSubset,
|
||||
self.dictionary,
|
||||
{'key-a':'val-b'}
|
||||
)
|
||||
self.dictionary,
|
||||
{'key-a': 'val-b'})
|
||||
|
||||
|
||||
class DefaultReaderTest(ReaderTest):
|
||||
|
||||
|
|
@ -153,17 +157,17 @@ class RstReaderTest(ReaderTest):
|
|||
'(?P<date>\d{4}-\d{2}-\d{2})'
|
||||
'_(?P<Slug>.*)'
|
||||
'#(?P<MyMeta>.*)-(?P<author>.*)'
|
||||
),
|
||||
),
|
||||
EXTRA_PATH_METADATA={
|
||||
input_with_metadata: {
|
||||
'key-1a': 'value-1a',
|
||||
'key-1b': 'value-1b'
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
expected_metadata = {
|
||||
'category': 'yeah',
|
||||
'author' : 'Alexis Métaireau',
|
||||
'author': 'Alexis Métaireau',
|
||||
'title': 'Rst with filename metadata',
|
||||
'date': SafeDatetime(2012, 11, 29),
|
||||
'slug': 'rst_w_filename_meta',
|
||||
|
|
@ -179,38 +183,41 @@ class RstReaderTest(ReaderTest):
|
|||
path=input_file_path_without_metadata,
|
||||
EXTRA_PATH_METADATA={
|
||||
input_file_path_without_metadata: {
|
||||
'author': 'Charlès Overwrite'}
|
||||
'author': 'Charlès Overwrite'
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
expected_without_metadata = {
|
||||
'category' : 'misc',
|
||||
'author' : 'Charlès Overwrite',
|
||||
'title' : 'Article title',
|
||||
'reader' : 'rst',
|
||||
'category': 'misc',
|
||||
'author': 'Charlès Overwrite',
|
||||
'title': 'Article title',
|
||||
'reader': 'rst',
|
||||
}
|
||||
self.assertDictHasSubset(
|
||||
page_without_metadata.metadata,
|
||||
expected_without_metadata)
|
||||
|
||||
def test_article_extra_path_metadata_dont_overwrite(self):
|
||||
#EXTRA_PATH_METADATA['author'] should get ignored
|
||||
#since we don't overwrite already set values
|
||||
# EXTRA_PATH_METADATA['author'] should get ignored
|
||||
# since we don't overwrite already set values
|
||||
input_file_path = '2012-11-29_rst_w_filename_meta#foo-bar.rst'
|
||||
page = self.read_file(
|
||||
path=input_file_path,
|
||||
FILENAME_METADATA=(
|
||||
'(?P<date>\d{4}-\d{2}-\d{2})'
|
||||
'_(?P<Slug>.*)'
|
||||
'#(?P<MyMeta>.*)-(?P<orginalauthor>.*)'),
|
||||
'#(?P<MyMeta>.*)-(?P<orginalauthor>.*)'
|
||||
),
|
||||
EXTRA_PATH_METADATA={
|
||||
input_file_path: {
|
||||
'author': 'Charlès Overwrite',
|
||||
'key-1b': 'value-1b'}
|
||||
'key-1b': 'value-1b'
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
expected = {
|
||||
'category': 'yeah',
|
||||
'author' : 'Alexis Métaireau',
|
||||
'author': 'Alexis Métaireau',
|
||||
'title': 'Rst with filename metadata',
|
||||
'date': SafeDatetime(2012, 11, 29),
|
||||
'slug': 'rst_w_filename_meta',
|
||||
|
|
@ -273,7 +280,7 @@ class RstReaderTest(ReaderTest):
|
|||
# typogrify should be able to ignore user specified tags,
|
||||
# but tries to be clever with widont extension
|
||||
page = self.read_file(path='article.rst', TYPOGRIFY=True,
|
||||
TYPOGRIFY_IGNORE_TAGS = ['p'])
|
||||
TYPOGRIFY_IGNORE_TAGS=['p'])
|
||||
expected = ('<p>THIS is some content. With some stuff to '
|
||||
'"typogrify"...</p>\n<p>Now with added '
|
||||
'support for <abbr title="three letter acronym">'
|
||||
|
|
@ -284,7 +291,7 @@ class RstReaderTest(ReaderTest):
|
|||
# typogrify should ignore code blocks by default because
|
||||
# code blocks are composed inside the pre tag
|
||||
page = self.read_file(path='article_with_code_block.rst',
|
||||
TYPOGRIFY=True)
|
||||
TYPOGRIFY=True)
|
||||
|
||||
expected = ('<p>An article with some code</p>\n'
|
||||
'<div class="highlight"><pre><span class="n">x</span>'
|
||||
|
|
@ -292,13 +299,17 @@ class RstReaderTest(ReaderTest):
|
|||
' <span class="n">y</span>\n</pre></div>\n'
|
||||
'<p>A block quote:</p>\n<blockquote>\nx '
|
||||
'<span class="amp">&</span> y</blockquote>\n'
|
||||
'<p>Normal:\nx <span class="amp">&</span> y</p>\n')
|
||||
'<p>Normal:\nx'
|
||||
' <span class="amp">&</span>'
|
||||
' y'
|
||||
'</p>\n')
|
||||
|
||||
self.assertEqual(page.content, expected)
|
||||
|
||||
# instruct typogrify to also ignore blockquotes
|
||||
page = self.read_file(path='article_with_code_block.rst',
|
||||
TYPOGRIFY=True, TYPOGRIFY_IGNORE_TAGS = ['blockquote'])
|
||||
TYPOGRIFY=True,
|
||||
TYPOGRIFY_IGNORE_TAGS=['blockquote'])
|
||||
|
||||
expected = ('<p>An article with some code</p>\n'
|
||||
'<div class="highlight"><pre><span class="n">x</span>'
|
||||
|
|
@ -306,7 +317,10 @@ class RstReaderTest(ReaderTest):
|
|||
' <span class="n">y</span>\n</pre></div>\n'
|
||||
'<p>A block quote:</p>\n<blockquote>\nx '
|
||||
'& y</blockquote>\n'
|
||||
'<p>Normal:\nx <span class="amp">&</span> y</p>\n')
|
||||
'<p>Normal:\nx'
|
||||
' <span class="amp">&</span>'
|
||||
' y'
|
||||
'</p>\n')
|
||||
|
||||
self.assertEqual(page.content, expected)
|
||||
except ImportError:
|
||||
|
|
@ -339,6 +353,7 @@ class RstReaderTest(ReaderTest):
|
|||
|
||||
self.assertDictHasSubset(page.metadata, expected)
|
||||
|
||||
|
||||
@unittest.skipUnless(readers.Markdown, "markdown isn't installed")
|
||||
class MdReaderTest(ReaderTest):
|
||||
|
||||
|
|
@ -400,7 +415,8 @@ class MdReaderTest(ReaderTest):
|
|||
'modified': SafeDatetime(2012, 11, 1),
|
||||
'multiline': [
|
||||
'Line Metadata should be handle properly.',
|
||||
'See syntax of Meta-Data extension of Python Markdown package:',
|
||||
'See syntax of Meta-Data extension of '
|
||||
'Python Markdown package:',
|
||||
'If a line is indented by 4 or more spaces,',
|
||||
'that line is assumed to be an additional line of the value',
|
||||
'for the previous keyword.',
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
from pelican.tests.support import unittest
|
||||
|
||||
try:
|
||||
from unittest.mock import Mock
|
||||
except ImportError:
|
||||
|
|
@ -7,7 +10,7 @@ except ImportError:
|
|||
from mock import Mock
|
||||
except ImportError:
|
||||
Mock = False
|
||||
from pelican.tests.support import unittest
|
||||
|
||||
|
||||
@unittest.skipUnless(Mock, 'Needs Mock module')
|
||||
class Test_abbr_role(unittest.TestCase):
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
import copy
|
||||
import os
|
||||
import locale
|
||||
from sys import platform
|
||||
from os.path import dirname, abspath, join
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
from pelican.settings import (read_settings, configure_settings,
|
||||
DEFAULT_CONFIG, DEFAULT_THEME)
|
||||
import copy
|
||||
import locale
|
||||
import os
|
||||
from os.path import abspath, dirname, join
|
||||
from sys import platform
|
||||
|
||||
|
||||
from pelican.settings import (DEFAULT_CONFIG, DEFAULT_THEME,
|
||||
configure_settings, read_settings)
|
||||
from pelican.tests.support import unittest
|
||||
|
||||
|
||||
|
|
@ -28,12 +30,14 @@ class TestSettingsConfiguration(unittest.TestCase):
|
|||
|
||||
def test_overwrite_existing_settings(self):
|
||||
self.assertEqual(self.settings.get('SITENAME'), "Alexis' log")
|
||||
self.assertEqual(self.settings.get('SITEURL'),
|
||||
'http://blog.notmyidea.org')
|
||||
self.assertEqual(
|
||||
self.settings.get('SITEURL'),
|
||||
'http://blog.notmyidea.org')
|
||||
|
||||
def test_keep_default_settings(self):
|
||||
# Keep default settings if not defined.
|
||||
self.assertEqual(self.settings.get('DEFAULT_CATEGORY'),
|
||||
self.assertEqual(
|
||||
self.settings.get('DEFAULT_CATEGORY'),
|
||||
DEFAULT_CONFIG['DEFAULT_CATEGORY'])
|
||||
|
||||
def test_dont_copy_small_keys(self):
|
||||
|
|
@ -69,28 +73,31 @@ class TestSettingsConfiguration(unittest.TestCase):
|
|||
|
||||
def test_static_path_settings_safety(self):
|
||||
# Disallow static paths from being strings
|
||||
settings = {'STATIC_PATHS': 'foo/bar',
|
||||
'THEME_STATIC_PATHS': 'bar/baz',
|
||||
# These 4 settings are required to run configure_settings
|
||||
'PATH': '.',
|
||||
'THEME': DEFAULT_THEME,
|
||||
'SITEURL': 'http://blog.notmyidea.org/',
|
||||
'LOCALE': '',
|
||||
}
|
||||
settings = {
|
||||
'STATIC_PATHS': 'foo/bar',
|
||||
'THEME_STATIC_PATHS': 'bar/baz',
|
||||
# These 4 settings are required to run configure_settings
|
||||
'PATH': '.',
|
||||
'THEME': DEFAULT_THEME,
|
||||
'SITEURL': 'http://blog.notmyidea.org/',
|
||||
'LOCALE': '',
|
||||
}
|
||||
configure_settings(settings)
|
||||
self.assertEqual(settings['STATIC_PATHS'],
|
||||
DEFAULT_CONFIG['STATIC_PATHS'])
|
||||
self.assertEqual(settings['THEME_STATIC_PATHS'],
|
||||
DEFAULT_CONFIG['THEME_STATIC_PATHS'])
|
||||
self.assertEqual(
|
||||
settings['STATIC_PATHS'],
|
||||
DEFAULT_CONFIG['STATIC_PATHS'])
|
||||
self.assertEqual(
|
||||
settings['THEME_STATIC_PATHS'],
|
||||
DEFAULT_CONFIG['THEME_STATIC_PATHS'])
|
||||
|
||||
def test_configure_settings(self):
|
||||
# Manipulations to settings should be applied correctly.
|
||||
settings = {
|
||||
'SITEURL': 'http://blog.notmyidea.org/',
|
||||
'LOCALE': '',
|
||||
'PATH': os.curdir,
|
||||
'THEME': DEFAULT_THEME,
|
||||
}
|
||||
'SITEURL': 'http://blog.notmyidea.org/',
|
||||
'LOCALE': '',
|
||||
'PATH': os.curdir,
|
||||
'THEME': DEFAULT_THEME,
|
||||
}
|
||||
configure_settings(settings)
|
||||
|
||||
# SITEURL should not have a trailing slash
|
||||
|
|
@ -154,7 +161,7 @@ class TestSettingsConfiguration(unittest.TestCase):
|
|||
settings['PATH'] = ''
|
||||
self.assertRaises(Exception, configure_settings, settings)
|
||||
|
||||
# Test nonexistent THEME
|
||||
# Test nonexistent THEME
|
||||
settings['PATH'] = os.curdir
|
||||
settings['THEME'] = 'foo'
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from pelican.urlwrappers import URLWrapper, Tag, Category
|
||||
from pelican.tests.support import unittest
|
||||
from pelican.urlwrappers import Category, Tag, URLWrapper
|
||||
|
||||
|
||||
class TestURLWrapper(unittest.TestCase):
|
||||
def test_ordering(self):
|
||||
|
|
|
|||
|
|
@ -1,20 +1,22 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function, absolute_import
|
||||
import logging
|
||||
import shutil
|
||||
import os
|
||||
import time
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
from sys import platform
|
||||
from tempfile import mkdtemp
|
||||
|
||||
import pytz
|
||||
|
||||
from pelican.generators import TemplatePagesGenerator
|
||||
from pelican.writers import Writer
|
||||
from pelican.settings import read_settings
|
||||
from pelican import utils
|
||||
from pelican.tests.support import get_article, LoggedTestCase, locale_available, unittest
|
||||
from pelican.generators import TemplatePagesGenerator
|
||||
from pelican.settings import read_settings
|
||||
from pelican.tests.support import (LoggedTestCase, get_article,
|
||||
locale_available, unittest)
|
||||
from pelican.writers import Writer
|
||||
|
||||
|
||||
class TestUtils(LoggedTestCase):
|
||||
|
|
@ -72,7 +74,7 @@ class TestUtils(LoggedTestCase):
|
|||
'2012-11-22T22:11:10Z': date_hour_sec_z,
|
||||
'2012-11-22T22:11:10-0500': date_hour_sec_est,
|
||||
'2012-11-22T22:11:10.123Z': date_hour_sec_frac_z,
|
||||
}
|
||||
}
|
||||
|
||||
# examples from http://www.w3.org/TR/NOTE-datetime
|
||||
iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16)
|
||||
|
|
@ -95,7 +97,6 @@ class TestUtils(LoggedTestCase):
|
|||
# invalid ones
|
||||
invalid_dates = ['2010-110-12', 'yay']
|
||||
|
||||
|
||||
for value, expected in dates.items():
|
||||
self.assertEqual(utils.get_date(value), expected, value)
|
||||
|
||||
|
|
@ -290,7 +291,9 @@ class TestUtils(LoggedTestCase):
|
|||
self.assertEqual(utils.strftime(d, '%d/%m/%Y'), '29/08/2012')
|
||||
|
||||
# RFC 3339
|
||||
self.assertEqual(utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'),'2012-08-29T00:00:00Z')
|
||||
self.assertEqual(
|
||||
utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'),
|
||||
'2012-08-29T00:00:00Z')
|
||||
|
||||
# % escaped
|
||||
self.assertEqual(utils.strftime(d, '%d%%%m%%%y'), '29%08%12')
|
||||
|
|
@ -306,8 +309,9 @@ class TestUtils(LoggedTestCase):
|
|||
'Published in 29-08-2012')
|
||||
|
||||
# with non-ascii text
|
||||
self.assertEqual(utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'),
|
||||
'29/08/2012 Øl trinken beim Besäufnis')
|
||||
self.assertEqual(
|
||||
utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'),
|
||||
'29/08/2012 Øl trinken beim Besäufnis')
|
||||
|
||||
# alternative formatting options
|
||||
self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '29/8/12')
|
||||
|
|
@ -316,7 +320,6 @@ class TestUtils(LoggedTestCase):
|
|||
d = utils.SafeDatetime(2012, 8, 9)
|
||||
self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '9/8/12')
|
||||
|
||||
|
||||
# test the output of utils.strftime in a different locale
|
||||
# Turkish locale
|
||||
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or
|
||||
|
|
@ -339,17 +342,18 @@ class TestUtils(LoggedTestCase):
|
|||
'Çarşamba, 29 Ağustos 2012')
|
||||
|
||||
# with text
|
||||
self.assertEqual(utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'),
|
||||
self.assertEqual(
|
||||
utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'),
|
||||
'Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012')
|
||||
|
||||
# non-ascii format candidate (someone might pass it... for some reason)
|
||||
self.assertEqual(utils.strftime(d, '%Y yılında %üretim artışı'),
|
||||
self.assertEqual(
|
||||
utils.strftime(d, '%Y yılında %üretim artışı'),
|
||||
'2012 yılında %üretim artışı')
|
||||
|
||||
# restore locale back
|
||||
locale.setlocale(locale.LC_ALL, old_locale)
|
||||
|
||||
|
||||
# test the output of utils.strftime in a different locale
|
||||
# French locale
|
||||
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
|
||||
|
|
@ -373,21 +377,28 @@ class TestUtils(LoggedTestCase):
|
|||
self.assertTrue(utils.strftime(d, '%A') in ('mercredi', 'Mercredi'))
|
||||
|
||||
# with text
|
||||
self.assertEqual(utils.strftime(d, 'Écrit le %d %B %Y'),
|
||||
self.assertEqual(
|
||||
utils.strftime(d, 'Écrit le %d %B %Y'),
|
||||
'Écrit le 29 août 2012')
|
||||
|
||||
# non-ascii format candidate (someone might pass it... for some reason)
|
||||
self.assertEqual(utils.strftime(d, '%écrits en %Y'),
|
||||
self.assertEqual(
|
||||
utils.strftime(d, '%écrits en %Y'),
|
||||
'%écrits en 2012')
|
||||
|
||||
# restore locale back
|
||||
locale.setlocale(locale.LC_ALL, old_locale)
|
||||
|
||||
|
||||
def test_maybe_pluralize(self):
|
||||
self.assertEqual(utils.maybe_pluralize(0, 'Article', 'Articles'), '0 Articles')
|
||||
self.assertEqual(utils.maybe_pluralize(1, 'Article', 'Articles'), '1 Article')
|
||||
self.assertEqual(utils.maybe_pluralize(2, 'Article', 'Articles'), '2 Articles')
|
||||
self.assertEqual(
|
||||
utils.maybe_pluralize(0, 'Article', 'Articles'),
|
||||
'0 Articles')
|
||||
self.assertEqual(
|
||||
utils.maybe_pluralize(1, 'Article', 'Articles'),
|
||||
'1 Article')
|
||||
self.assertEqual(
|
||||
utils.maybe_pluralize(2, 'Article', 'Articles'),
|
||||
'2 Articles')
|
||||
|
||||
|
||||
class TestCopy(unittest.TestCase):
|
||||
|
|
@ -435,8 +446,9 @@ class TestCopy(unittest.TestCase):
|
|||
|
||||
def test_copy_file_create_dirs(self):
|
||||
self._create_file('a.txt')
|
||||
utils.copy(os.path.join(self.root_dir, 'a.txt'),
|
||||
os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt'))
|
||||
utils.copy(
|
||||
os.path.join(self.root_dir, 'a.txt'),
|
||||
os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt'))
|
||||
self._exist_dir('b0')
|
||||
self._exist_dir('b0', 'b1')
|
||||
self._exist_dir('b0', 'b1', 'b2')
|
||||
|
|
@ -491,35 +503,39 @@ class TestDateFormatter(unittest.TestCase):
|
|||
template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}')
|
||||
self.date = utils.SafeDatetime(2012, 8, 29)
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.temp_content)
|
||||
shutil.rmtree(self.temp_output)
|
||||
# reset locale to default
|
||||
locale.setlocale(locale.LC_ALL, '')
|
||||
|
||||
|
||||
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
|
||||
locale_available('French'),
|
||||
'French locale needed')
|
||||
def test_french_strftime(self):
|
||||
# This test tries to reproduce an issue that occurred with python3.3 under macos10 only
|
||||
# This test tries to reproduce an issue that
|
||||
# occurred with python3.3 under macos10 only
|
||||
if platform == 'win32':
|
||||
locale.setlocale(locale.LC_ALL, str('French'))
|
||||
else:
|
||||
locale.setlocale(locale.LC_ALL, str('fr_FR.UTF-8'))
|
||||
date = utils.SafeDatetime(2014,8,14)
|
||||
# we compare the lower() dates since macos10 returns "Jeudi" for %A whereas linux reports "jeudi"
|
||||
self.assertEqual( u'jeudi, 14 août 2014', utils.strftime(date, date_format="%A, %d %B %Y").lower() )
|
||||
date = utils.SafeDatetime(2014, 8, 14)
|
||||
# we compare the lower() dates since macos10 returns
|
||||
# "Jeudi" for %A whereas linux reports "jeudi"
|
||||
self.assertEqual(
|
||||
u'jeudi, 14 août 2014',
|
||||
utils.strftime(date, date_format="%A, %d %B %Y").lower())
|
||||
df = utils.DateFormatter()
|
||||
self.assertEqual( u'jeudi, 14 août 2014', df(date, date_format="%A, %d %B %Y").lower() )
|
||||
self.assertEqual(
|
||||
u'jeudi, 14 août 2014',
|
||||
df(date, date_format="%A, %d %B %Y").lower())
|
||||
# Let us now set the global locale to C:
|
||||
locale.setlocale(locale.LC_ALL, str('C'))
|
||||
# DateFormatter should still work as expected since it is the whole point of DateFormatter
|
||||
# DateFormatter should still work as expected
|
||||
# since it is the whole point of DateFormatter
|
||||
# (This is where pre-2014/4/15 code fails on macos10)
|
||||
df_date = df(date, date_format="%A, %d %B %Y").lower()
|
||||
self.assertEqual( u'jeudi, 14 août 2014', df_date )
|
||||
|
||||
self.assertEqual(u'jeudi, 14 août 2014', df_date)
|
||||
|
||||
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
|
||||
locale_available('French'),
|
||||
|
|
@ -530,9 +546,12 @@ class TestDateFormatter(unittest.TestCase):
|
|||
else:
|
||||
locale_string = 'fr_FR.UTF-8'
|
||||
settings = read_settings(
|
||||
override = {'LOCALE': locale_string,
|
||||
'TEMPLATE_PAGES': {'template/source.html':
|
||||
'generated/file.html'}})
|
||||
override={
|
||||
'LOCALE': locale_string,
|
||||
'TEMPLATE_PAGES': {
|
||||
'template/source.html': 'generated/file.html'
|
||||
}
|
||||
})
|
||||
|
||||
generator = TemplatePagesGenerator(
|
||||
{'date': self.date}, settings,
|
||||
|
|
@ -543,7 +562,7 @@ class TestDateFormatter(unittest.TestCase):
|
|||
generator.generate_output(writer)
|
||||
|
||||
output_path = os.path.join(
|
||||
self.temp_output, 'generated', 'file.html')
|
||||
self.temp_output, 'generated', 'file.html')
|
||||
|
||||
# output file has been generated
|
||||
self.assertTrue(os.path.exists(output_path))
|
||||
|
|
@ -553,7 +572,6 @@ class TestDateFormatter(unittest.TestCase):
|
|||
self.assertEqual(output_file,
|
||||
utils.strftime(self.date, 'date = %A, %d %B %Y'))
|
||||
|
||||
|
||||
@unittest.skipUnless(locale_available('tr_TR.UTF-8') or
|
||||
locale_available('Turkish'),
|
||||
'Turkish locale needed')
|
||||
|
|
@ -563,9 +581,12 @@ class TestDateFormatter(unittest.TestCase):
|
|||
else:
|
||||
locale_string = 'tr_TR.UTF-8'
|
||||
settings = read_settings(
|
||||
override = {'LOCALE': locale_string,
|
||||
'TEMPLATE_PAGES': {'template/source.html':
|
||||
'generated/file.html'}})
|
||||
override={
|
||||
'LOCALE': locale_string,
|
||||
'TEMPLATE_PAGES': {
|
||||
'template/source.html': 'generated/file.html'
|
||||
}
|
||||
})
|
||||
|
||||
generator = TemplatePagesGenerator(
|
||||
{'date': self.date}, settings,
|
||||
|
|
@ -576,7 +597,7 @@ class TestDateFormatter(unittest.TestCase):
|
|||
generator.generate_output(writer)
|
||||
|
||||
output_path = os.path.join(
|
||||
self.temp_output, 'generated', 'file.html')
|
||||
self.temp_output, 'generated', 'file.html')
|
||||
|
||||
# output file has been generated
|
||||
self.assertTrue(os.path.exists(output_path))
|
||||
|
|
|
|||
|
|
@ -1,29 +1,30 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
try:
|
||||
from html import unescape # py3.4+
|
||||
except ImportError:
|
||||
from six.moves.html_parser import HTMLParser
|
||||
unescape = HTMLParser().unescape
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
|
||||
from codecs import open
|
||||
|
||||
from six.moves.urllib.error import URLError
|
||||
from six.moves.urllib.parse import urlparse
|
||||
from six.moves.urllib.request import urlretrieve
|
||||
|
||||
# pelican.log has to be the first pelican module to be loaded
|
||||
# because logging.setLoggerClass has to be called before logging.getLogger
|
||||
from pelican.log import init
|
||||
from pelican.utils import slugify, SafeDatetime
|
||||
from pelican.utils import SafeDatetime, slugify
|
||||
|
||||
try:
|
||||
from html import unescape # py3.4+
|
||||
except ImportError:
|
||||
from six.moves.html_parser import HTMLParser
|
||||
unescape = HTMLParser().unescape
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -70,12 +71,19 @@ def decode_wp_content(content, br=True):
|
|||
content = ""
|
||||
for p in pgraphs:
|
||||
content = content + "<p>" + p.strip() + "</p>\n"
|
||||
# under certain strange conditions it could create a P of entirely whitespace
|
||||
# under certain strange conditions it could create
|
||||
# a P of entirely whitespace
|
||||
content = re.sub(r'<p>\s*</p>', '', content)
|
||||
content = re.sub(r'<p>([^<]+)</(div|address|form)>', "<p>\\1</p></\\2>", content)
|
||||
content = re.sub(
|
||||
r'<p>([^<]+)</(div|address|form)>',
|
||||
"<p>\\1</p></\\2>",
|
||||
content)
|
||||
# don't wrap tags
|
||||
content = re.sub(r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>', "\\1", content)
|
||||
#problem with nested lists
|
||||
content = re.sub(
|
||||
r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>',
|
||||
"\\1",
|
||||
content)
|
||||
# problem with nested lists
|
||||
content = re.sub(r'<p>(<li.*)</p>', "\\1", content)
|
||||
content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content)
|
||||
content = content.replace('</blockquote></p>', '</p></blockquote>')
|
||||
|
|
@ -84,12 +92,20 @@ def decode_wp_content(content, br=True):
|
|||
if br:
|
||||
def _preserve_newline(match):
|
||||
return match.group(0).replace("\n", "<WPPreserveNewline />")
|
||||
content = re.sub(r'/<(script|style).*?<\/\\1>/s', _preserve_newline, content)
|
||||
content = re.sub(
|
||||
r'/<(script|style).*?<\/\\1>/s',
|
||||
_preserve_newline,
|
||||
content)
|
||||
# optionally make line breaks
|
||||
content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content)
|
||||
content = content.replace("<WPPreserveNewline />", "\n")
|
||||
content = re.sub(r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1", content)
|
||||
content = re.sub(r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)', '\\1', content)
|
||||
content = re.sub(
|
||||
r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1",
|
||||
content)
|
||||
content = re.sub(
|
||||
r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)',
|
||||
'\\1',
|
||||
content)
|
||||
content = re.sub(r'\n</p>', "</p>", content)
|
||||
|
||||
if pre_tags:
|
||||
|
|
@ -100,13 +116,14 @@ def decode_wp_content(content, br=True):
|
|||
|
||||
return content
|
||||
|
||||
|
||||
def get_items(xml):
|
||||
"""Opens a WordPress xml file and returns a list of items"""
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
except ImportError:
|
||||
error = ('Missing dependency '
|
||||
'"BeautifulSoup4" and "lxml" required to import WordPress XML files.')
|
||||
error = ('Missing dependency "BeautifulSoup4" and "lxml" required to '
|
||||
'import WordPress XML files.')
|
||||
sys.exit(error)
|
||||
with open(xml, encoding='utf-8') as infile:
|
||||
xmlfile = infile.read()
|
||||
|
|
@ -114,12 +131,14 @@ def get_items(xml):
|
|||
items = soup.rss.channel.findAll('item')
|
||||
return items
|
||||
|
||||
|
||||
def get_filename(filename, post_id):
|
||||
if filename is not None:
|
||||
return filename
|
||||
else:
|
||||
return post_id
|
||||
|
||||
|
||||
def wp2fields(xml, wp_custpost=False):
|
||||
"""Opens a wordpress XML file, and yield Pelican fields"""
|
||||
|
||||
|
|
@ -141,16 +160,18 @@ def wp2fields(xml, wp_custpost=False):
|
|||
|
||||
content = item.find('encoded').string
|
||||
raw_date = item.find('post_date').string
|
||||
date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S")
|
||||
date = time.strftime("%Y-%m-%d %H:%M", date_object)
|
||||
date_object = time.strptime(raw_date, '%Y-%m-%d %H:%M:%S')
|
||||
date = time.strftime('%Y-%m-%d %H:%M', date_object)
|
||||
author = item.find('creator').string
|
||||
|
||||
categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})]
|
||||
# caturl = [cat['nicename'] for cat in item.find(domain='category')]
|
||||
categories = [cat.string for cat
|
||||
in item.findAll('category', {'domain': 'category'})]
|
||||
|
||||
tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})]
|
||||
tags = [tag.string for tag
|
||||
in item.findAll('category', {'domain': 'post_tag'})]
|
||||
# To publish a post the status should be 'published'
|
||||
status = 'published' if item.find('status').string == "publish" else item.find('status').string
|
||||
status = 'published' if item.find('status').string == "publish" \
|
||||
else item.find('status').string
|
||||
|
||||
kind = 'article'
|
||||
post_type = item.find('post_type').string
|
||||
|
|
@ -159,16 +180,17 @@ def wp2fields(xml, wp_custpost=False):
|
|||
elif wp_custpost:
|
||||
if post_type == 'post':
|
||||
pass
|
||||
# Old behaviour was to name everything not a page as an article.
|
||||
# Theoretically all attachments have status == inherit so
|
||||
# no attachments should be here. But this statement is to
|
||||
# Old behaviour was to name everything not a page as an
|
||||
# article.Theoretically all attachments have status == inherit
|
||||
# so no attachments should be here. But this statement is to
|
||||
# maintain existing behaviour in case that doesn't hold true.
|
||||
elif post_type == 'attachment':
|
||||
pass
|
||||
else:
|
||||
kind = post_type
|
||||
yield (title, content, filename, date, author, categories, tags, status,
|
||||
kind, "wp-html")
|
||||
yield (title, content, filename, date, author, categories,
|
||||
tags, status, kind, 'wp-html')
|
||||
|
||||
|
||||
def dc2fields(file):
|
||||
"""Opens a Dotclear export file, and yield pelican fields"""
|
||||
|
|
@ -176,10 +198,10 @@ def dc2fields(file):
|
|||
from bs4 import BeautifulSoup
|
||||
except ImportError:
|
||||
error = ('Missing dependency '
|
||||
'"BeautifulSoup4" and "lxml" required to import Dotclear files.')
|
||||
'"BeautifulSoup4" and "lxml" required '
|
||||
'to import Dotclear files.')
|
||||
sys.exit(error)
|
||||
|
||||
|
||||
in_cat = False
|
||||
in_post = False
|
||||
category_list = {}
|
||||
|
|
@ -203,7 +225,7 @@ def dc2fields(file):
|
|||
# remove 1st and last ""
|
||||
fields[0] = fields[0][1:]
|
||||
# fields[-1] = fields[-1][:-1]
|
||||
category_list[fields[0]]=fields[2]
|
||||
category_list[fields[0]] = fields[2]
|
||||
elif in_post:
|
||||
if not line:
|
||||
in_post = False
|
||||
|
|
@ -249,45 +271,50 @@ def dc2fields(file):
|
|||
# remove seconds
|
||||
post_creadt = ':'.join(post_creadt.split(':')[0:2])
|
||||
|
||||
author = ""
|
||||
author = ''
|
||||
categories = []
|
||||
tags = []
|
||||
|
||||
if cat_id:
|
||||
categories = [category_list[id].strip() for id in cat_id.split(',')]
|
||||
categories = [category_list[id].strip() for id
|
||||
in cat_id.split(',')]
|
||||
|
||||
# Get tags related to a post
|
||||
tag = post_meta.replace('{', '').replace('}', '').replace('a:1:s:3:\\"tag\\";a:', '').replace('a:0:', '')
|
||||
tag = (post_meta.replace('{', '')
|
||||
.replace('}', '')
|
||||
.replace('a:1:s:3:\\"tag\\";a:', '')
|
||||
.replace('a:0:', ''))
|
||||
if len(tag) > 1:
|
||||
if int(len(tag[:1])) == 1:
|
||||
newtag = tag.split('"')[1]
|
||||
tags.append(
|
||||
BeautifulSoup(
|
||||
newtag
|
||||
, "xml"
|
||||
newtag,
|
||||
'xml'
|
||||
)
|
||||
# bs4 always outputs UTF-8
|
||||
.decode('utf-8')
|
||||
)
|
||||
else:
|
||||
i=1
|
||||
j=1
|
||||
i = 1
|
||||
j = 1
|
||||
while(i <= int(tag[:1])):
|
||||
newtag = tag.split('"')[j].replace('\\','')
|
||||
newtag = tag.split('"')[j].replace('\\', '')
|
||||
tags.append(
|
||||
BeautifulSoup(
|
||||
newtag
|
||||
, "xml"
|
||||
newtag,
|
||||
'xml'
|
||||
)
|
||||
# bs4 always outputs UTF-8
|
||||
.decode('utf-8')
|
||||
)
|
||||
i=i+1
|
||||
if j < int(tag[:1])*2:
|
||||
j=j+2
|
||||
i = i + 1
|
||||
if j < int(tag[:1]) * 2:
|
||||
j = j + 2
|
||||
|
||||
"""
|
||||
dotclear2 does not use markdown by default unless you use the markdown plugin
|
||||
dotclear2 does not use markdown by default unless
|
||||
you use the markdown plugin
|
||||
Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
|
||||
"""
|
||||
if post_format == "markdown":
|
||||
|
|
@ -322,12 +349,13 @@ def posterous2fields(api_token, email, password):
|
|||
# py2 import
|
||||
import urllib2 as urllib_request
|
||||
|
||||
|
||||
def get_posterous_posts(api_token, email, password, page = 1):
|
||||
base64string = base64.encodestring(("%s:%s" % (email, password)).encode('utf-8')).replace(b'\n', b'')
|
||||
url = "http://posterous.com/api/v2/users/me/sites/primary/posts?api_token=%s&page=%d" % (api_token, page)
|
||||
def get_posterous_posts(api_token, email, password, page=1):
|
||||
base64string = base64.encodestring(
|
||||
("%s:%s" % (email, password)).encode('utf-8')).replace('\n', '')
|
||||
url = ("http://posterous.com/api/v2/users/me/sites/primary/"
|
||||
"posts?api_token=%s&page=%d") % (api_token, page)
|
||||
request = urllib_request.Request(url)
|
||||
request.add_header("Authorization", "Basic %s" % base64string.decode())
|
||||
request.add_header('Authorization', 'Basic %s' % base64string.decode())
|
||||
handle = urllib_request.urlopen(request)
|
||||
posts = json.loads(handle.read().decode('utf-8'))
|
||||
return posts
|
||||
|
|
@ -344,16 +372,18 @@ def posterous2fields(api_token, email, password):
|
|||
slug = slugify(post.get('title'))
|
||||
tags = [tag.get('name') for tag in post.get('tags')]
|
||||
raw_date = post.get('display_date')
|
||||
date_object = SafeDatetime.strptime(raw_date[:-6], "%Y/%m/%d %H:%M:%S")
|
||||
date_object = SafeDatetime.strptime(
|
||||
raw_date[:-6], '%Y/%m/%d %H:%M:%S')
|
||||
offset = int(raw_date[-5:])
|
||||
delta = timedelta(hours = offset / 100)
|
||||
delta = timedelta(hours=(offset / 100))
|
||||
date_object -= delta
|
||||
date = date_object.strftime("%Y-%m-%d %H:%M")
|
||||
kind = 'article' # TODO: Recognise pages
|
||||
date = date_object.strftime('%Y-%m-%d %H:%M')
|
||||
kind = 'article' # TODO: Recognise pages
|
||||
status = 'published' # TODO: Find a way for draft posts
|
||||
|
||||
yield (post.get('title'), post.get('body_cleaned'), slug, date,
|
||||
post.get('user').get('display_name'), [], tags, status, kind, "html")
|
||||
yield (post.get('title'), post.get('body_cleaned'),
|
||||
slug, date, post.get('user').get('display_name'),
|
||||
[], tags, status, kind, 'html')
|
||||
|
||||
|
||||
def tumblr2fields(api_key, blogname):
|
||||
|
|
@ -374,7 +404,9 @@ def tumblr2fields(api_key, blogname):
|
|||
import urllib2 as urllib_request
|
||||
|
||||
def get_tumblr_posts(api_key, blogname, offset=0):
|
||||
url = "http://api.tumblr.com/v2/blog/%s.tumblr.com/posts?api_key=%s&offset=%d&filter=raw" % (blogname, api_key, offset)
|
||||
url = ("http://api.tumblr.com/v2/blog/%s.tumblr.com/"
|
||||
"posts?api_key=%s&offset=%d&filter=raw") % (
|
||||
blogname, api_key, offset)
|
||||
request = urllib_request.Request(url)
|
||||
handle = urllib_request.urlopen(request)
|
||||
posts = json.loads(handle.read().decode('utf-8'))
|
||||
|
|
@ -384,7 +416,10 @@ def tumblr2fields(api_key, blogname):
|
|||
posts = get_tumblr_posts(api_key, blogname, offset)
|
||||
while len(posts) > 0:
|
||||
for post in posts:
|
||||
title = post.get('title') or post.get('source_title') or post.get('type').capitalize()
|
||||
title = \
|
||||
post.get('title') or \
|
||||
post.get('source_title') or \
|
||||
post.get('type').capitalize()
|
||||
slug = post.get('slug') or slugify(title)
|
||||
tags = post.get('tags')
|
||||
timestamp = post.get('timestamp')
|
||||
|
|
@ -398,7 +433,11 @@ def tumblr2fields(api_key, blogname):
|
|||
fmtstr = ''
|
||||
else:
|
||||
fmtstr = '<img alt="%s" src="%s" />'
|
||||
content = '\n'.join(fmtstr % (photo.get('caption'), photo.get('original_size').get('url')) for photo in post.get('photos'))
|
||||
content = ''
|
||||
for photo in post.get('photos'):
|
||||
content += '\n'.join(
|
||||
fmtstr % (photo.get('caption'),
|
||||
photo.get('original_size').get('url')))
|
||||
content += '\n\n' + post.get('caption')
|
||||
elif type == 'quote':
|
||||
if format == 'markdown':
|
||||
|
|
@ -417,16 +456,29 @@ def tumblr2fields(api_key, blogname):
|
|||
fmtstr = '[via](%s)\n\n'
|
||||
else:
|
||||
fmtstr = '<p><a href="%s">via</a></p>\n'
|
||||
content = fmtstr % post.get('source_url') + post.get('caption') + post.get('player')
|
||||
content = fmtstr % post.get('source_url') + \
|
||||
post.get('caption') + \
|
||||
post.get('player')
|
||||
elif type == 'video':
|
||||
if format == 'markdown':
|
||||
fmtstr = '[via](%s)\n\n'
|
||||
else:
|
||||
fmtstr = '<p><a href="%s">via</a></p>\n'
|
||||
content = fmtstr % post.get('source_url') + post.get('caption') + '\n'.join(player.get('embed_code') for player in post.get('player'))
|
||||
source = fmtstr % post.get('source_url')
|
||||
caption = post.get('caption')
|
||||
players = '\n'.join(player.get('embed_code')
|
||||
for player in post.get('player'))
|
||||
content = source + caption + players
|
||||
elif type == 'answer':
|
||||
title = post.get('question')
|
||||
content = '<p><a href="%s" rel="external nofollow">%s</a>: %s</p>\n%s' % (post.get('asking_name'), post.get('asking_url'), post.get('question'), post.get('answer'))
|
||||
content = ('<p>'
|
||||
'<a href="%s" rel="external nofollow">%s</a>'
|
||||
': %s'
|
||||
'</p>\n'
|
||||
' %s' % (post.get('asking_name'),
|
||||
post.get('asking_url'),
|
||||
post.get('question'),
|
||||
post.get('answer')))
|
||||
|
||||
content = content.rstrip() + '\n'
|
||||
kind = 'article'
|
||||
|
|
@ -438,25 +490,30 @@ def tumblr2fields(api_key, blogname):
|
|||
offset += len(posts)
|
||||
posts = get_tumblr_posts(api_key, blogname, offset)
|
||||
|
||||
|
||||
def feed2fields(file):
|
||||
"""Read a feed and yield pelican fields"""
|
||||
import feedparser
|
||||
d = feedparser.parse(file)
|
||||
for entry in d.entries:
|
||||
date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed)
|
||||
if hasattr(entry, "updated_parsed") else None)
|
||||
author = entry.author if hasattr(entry, "author") else None
|
||||
tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None
|
||||
date = (time.strftime('%Y-%m-%d %H:%M', entry.updated_parsed)
|
||||
if hasattr(entry, 'updated_parsed') else None)
|
||||
author = entry.author if hasattr(entry, 'author') else None
|
||||
tags = ([e['term'] for e in entry.tags]
|
||||
if hasattr(entry, 'tags') else None)
|
||||
|
||||
slug = slugify(entry.title)
|
||||
kind = 'article'
|
||||
yield (entry.title, entry.description, slug, date, author, [], tags, None,
|
||||
kind, "html")
|
||||
yield (entry.title, entry.description, slug, date,
|
||||
author, [], tags, None, kind, 'html')
|
||||
|
||||
|
||||
def build_header(title, date, author, categories, tags, slug,
|
||||
status=None, attachments=None):
|
||||
"""Build a header from a list of fields"""
|
||||
|
||||
def build_header(title, date, author, categories, tags, slug, status=None, attachments=None):
|
||||
from docutils.utils import column_width
|
||||
|
||||
"""Build a header from a list of fields"""
|
||||
header = '%s\n%s\n' % (title, '#' * column_width(title))
|
||||
if date:
|
||||
header += ':date: %s\n' % date
|
||||
|
|
@ -475,8 +532,9 @@ def build_header(title, date, author, categories, tags, slug, status=None, attac
|
|||
header += '\n'
|
||||
return header
|
||||
|
||||
def build_markdown_header(title, date, author, categories, tags, slug, status=None,
|
||||
attachments=None):
|
||||
|
||||
def build_markdown_header(title, date, author, categories, tags,
|
||||
slug, status=None, attachments=None):
|
||||
"""Build a header from a list of fields"""
|
||||
header = 'Title: %s\n' % title
|
||||
if date:
|
||||
|
|
@ -496,6 +554,7 @@ def build_markdown_header(title, date, author, categories, tags, slug, status=No
|
|||
header += '\n'
|
||||
return header
|
||||
|
||||
|
||||
def get_ext(out_markup, in_markup='html'):
|
||||
if in_markup == 'markdown' or out_markup == 'markdown':
|
||||
ext = '.md'
|
||||
|
|
@ -503,26 +562,27 @@ def get_ext(out_markup, in_markup='html'):
|
|||
ext = '.rst'
|
||||
return ext
|
||||
|
||||
|
||||
def get_out_filename(output_path, filename, ext, kind,
|
||||
dirpage, dircat, categories, wp_custpost):
|
||||
dirpage, dircat, categories, wp_custpost):
|
||||
filename = os.path.basename(filename)
|
||||
|
||||
# Enforce filename restrictions for various filesystems at once; see
|
||||
# http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
|
||||
# we do not need to filter words because an extension will be appended
|
||||
filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
|
||||
filename = filename.lstrip('.') # should not start with a dot
|
||||
filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
|
||||
filename = filename.lstrip('.') # should not start with a dot
|
||||
if not filename:
|
||||
filename = '_'
|
||||
filename = filename[:249] # allow for 5 extra characters
|
||||
filename = filename[:249] # allow for 5 extra characters
|
||||
|
||||
out_filename = os.path.join(output_path, filename+ext)
|
||||
out_filename = os.path.join(output_path, filename + ext)
|
||||
# option to put page posts in pages/ subdirectory
|
||||
if dirpage and kind == 'page':
|
||||
pages_dir = os.path.join(output_path, 'pages')
|
||||
if not os.path.isdir(pages_dir):
|
||||
os.mkdir(pages_dir)
|
||||
out_filename = os.path.join(pages_dir, filename+ext)
|
||||
out_filename = os.path.join(pages_dir, filename + ext)
|
||||
elif not dirpage and kind == 'page':
|
||||
pass
|
||||
# option to put wp custom post types in directories with post type
|
||||
|
|
@ -539,18 +599,19 @@ def get_out_filename(output_path, filename, ext, kind,
|
|||
else:
|
||||
catname = ''
|
||||
out_filename = os.path.join(output_path, typename,
|
||||
catname, filename+ext)
|
||||
catname, filename + ext)
|
||||
if not os.path.isdir(os.path.join(output_path, typename, catname)):
|
||||
os.makedirs(os.path.join(output_path, typename, catname))
|
||||
# option to put files in directories with categories names
|
||||
elif dircat and (len(categories) > 0):
|
||||
catname = slugify(categories[0])
|
||||
out_filename = os.path.join(output_path, catname, filename+ext)
|
||||
out_filename = os.path.join(output_path, catname, filename + ext)
|
||||
if not os.path.isdir(os.path.join(output_path, catname)):
|
||||
os.mkdir(os.path.join(output_path, catname))
|
||||
|
||||
return out_filename
|
||||
|
||||
|
||||
def get_attachments(xml):
|
||||
"""returns a dictionary of posts that have attachments with a list
|
||||
of the attachment_urls
|
||||
|
|
@ -566,7 +627,7 @@ def get_attachments(xml):
|
|||
|
||||
if kind == 'attachment':
|
||||
attachments.append((item.find('post_parent').string,
|
||||
item.find('attachment_url').string))
|
||||
item.find('attachment_url').string))
|
||||
else:
|
||||
filename = get_filename(filename, post_id)
|
||||
names[post_id] = filename
|
||||
|
|
@ -575,7 +636,7 @@ def get_attachments(xml):
|
|||
try:
|
||||
parent_name = names[parent]
|
||||
except KeyError:
|
||||
#attachment's parent is not a valid post
|
||||
# attachment's parent is not a valid post
|
||||
parent_name = None
|
||||
|
||||
try:
|
||||
|
|
@ -585,6 +646,7 @@ def get_attachments(xml):
|
|||
attachedposts[parent_name].append(url)
|
||||
return attachedposts
|
||||
|
||||
|
||||
def download_attachments(output_path, urls):
|
||||
"""Downloads WordPress attachments and returns a list of paths to
|
||||
attachments that can be associated with a post (relative path to output
|
||||
|
|
@ -592,8 +654,8 @@ def download_attachments(output_path, urls):
|
|||
locations = []
|
||||
for url in urls:
|
||||
path = urlparse(url).path
|
||||
#teardown path and rebuild to negate any errors with
|
||||
#os.path.join and leading /'s
|
||||
# teardown path and rebuild to negate any errors with
|
||||
# os.path.join and leading /'s
|
||||
path = path.split('/')
|
||||
filename = path.pop(-1)
|
||||
localpath = ''
|
||||
|
|
@ -608,12 +670,13 @@ def download_attachments(output_path, urls):
|
|||
urlretrieve(url, os.path.join(full_path, filename))
|
||||
locations.append(os.path.join(localpath, filename))
|
||||
except (URLError, IOError) as e:
|
||||
#Python 2.7 throws an IOError rather Than URLError
|
||||
# Python 2.7 throws an IOError rather Than URLError
|
||||
logger.warning("No file could be downloaded from %s\n%s", url, e)
|
||||
return locations
|
||||
|
||||
|
||||
def fields2pelican(fields, out_markup, output_path,
|
||||
def fields2pelican(
|
||||
fields, out_markup, output_path,
|
||||
dircat=False, strip_raw=False, disable_slugs=False,
|
||||
dirpage=False, filename_template=None, filter_author=None,
|
||||
wp_custpost=False, wp_attach=False, attachments=None):
|
||||
|
|
@ -634,24 +697,26 @@ def fields2pelican(fields, out_markup, output_path,
|
|||
|
||||
ext = get_ext(out_markup, in_markup)
|
||||
if ext == '.md':
|
||||
header = build_markdown_header(title, date, author, categories,
|
||||
tags, slug, status, attached_files)
|
||||
header = build_markdown_header(
|
||||
title, date, author, categories, tags, slug,
|
||||
status, attached_files)
|
||||
else:
|
||||
out_markup = "rst"
|
||||
out_markup = 'rst'
|
||||
header = build_header(title, date, author, categories,
|
||||
tags, slug, status, attached_files)
|
||||
tags, slug, status, attached_files)
|
||||
|
||||
out_filename = get_out_filename(output_path, filename, ext,
|
||||
kind, dirpage, dircat, categories, wp_custpost)
|
||||
out_filename = get_out_filename(
|
||||
output_path, filename, ext, kind, dirpage, dircat,
|
||||
categories, wp_custpost)
|
||||
print(out_filename)
|
||||
|
||||
if in_markup in ("html", "wp-html"):
|
||||
html_filename = os.path.join(output_path, filename+'.html')
|
||||
if in_markup in ('html', 'wp-html'):
|
||||
html_filename = os.path.join(output_path, filename + '.html')
|
||||
|
||||
with open(html_filename, 'w', encoding='utf-8') as fp:
|
||||
# Replace newlines with paragraphs wrapped with <p> so
|
||||
# HTML is valid before conversion
|
||||
if in_markup == "wp-html":
|
||||
if in_markup == 'wp-html':
|
||||
new_content = decode_wp_content(content)
|
||||
else:
|
||||
paragraphs = content.splitlines()
|
||||
|
|
@ -660,79 +725,95 @@ def fields2pelican(fields, out_markup, output_path,
|
|||
|
||||
fp.write(new_content)
|
||||
|
||||
|
||||
parse_raw = '--parse-raw' if not strip_raw else ''
|
||||
cmd = ('pandoc --normalize {0} --from=html'
|
||||
' --to={1} -o "{2}" "{3}"').format(
|
||||
parse_raw, out_markup, out_filename, html_filename)
|
||||
' --to={1} -o "{2}" "{3}"')
|
||||
cmd = cmd.format(parse_raw, out_markup,
|
||||
out_filename, html_filename)
|
||||
|
||||
try:
|
||||
rc = subprocess.call(cmd, shell=True)
|
||||
if rc < 0:
|
||||
error = "Child was terminated by signal %d" % -rc
|
||||
error = 'Child was terminated by signal %d' % -rc
|
||||
exit(error)
|
||||
|
||||
elif rc > 0:
|
||||
error = "Please, check your Pandoc installation."
|
||||
error = 'Please, check your Pandoc installation.'
|
||||
exit(error)
|
||||
except OSError as e:
|
||||
error = "Pandoc execution failed: %s" % e
|
||||
error = 'Pandoc execution failed: %s' % e
|
||||
exit(error)
|
||||
|
||||
os.remove(html_filename)
|
||||
|
||||
with open(out_filename, 'r', encoding='utf-8') as fs:
|
||||
content = fs.read()
|
||||
if out_markup == "markdown":
|
||||
# In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line
|
||||
content = content.replace("\\\n ", " \n")
|
||||
content = content.replace("\\\n", " \n")
|
||||
if out_markup == 'markdown':
|
||||
# In markdown, to insert a <br />, end a line with two
|
||||
# or more spaces & then a end-of-line
|
||||
content = content.replace('\\\n ', ' \n')
|
||||
content = content.replace('\\\n', ' \n')
|
||||
|
||||
with open(out_filename, 'w', encoding='utf-8') as fs:
|
||||
fs.write(header + content)
|
||||
if wp_attach and attachments and None in attachments:
|
||||
print("downloading attachments that don't have a parent post")
|
||||
urls = attachments[None]
|
||||
orphan_galleries = download_attachments(output_path, urls)
|
||||
download_attachments(output_path, urls)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Transform feed, WordPress, Tumblr, Dotclear, or Posterous "
|
||||
"files into reST (rst) or Markdown (md) files. Be sure to "
|
||||
"have pandoc installed.",
|
||||
description="Transform feed, WordPress, Tumblr, Dotclear, or "
|
||||
"Posterous files into reST (rst) or Markdown (md) files. "
|
||||
"Be sure to have pandoc installed.",
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
|
||||
parser.add_argument(dest='input', help='The input file to read')
|
||||
parser.add_argument('--wpfile', action='store_true', dest='wpfile',
|
||||
parser.add_argument(
|
||||
dest='input', help='The input file to read')
|
||||
parser.add_argument(
|
||||
'--wpfile', action='store_true', dest='wpfile',
|
||||
help='Wordpress XML export')
|
||||
parser.add_argument('--dotclear', action='store_true', dest='dotclear',
|
||||
parser.add_argument(
|
||||
'--dotclear', action='store_true', dest='dotclear',
|
||||
help='Dotclear export')
|
||||
parser.add_argument('--posterous', action='store_true', dest='posterous',
|
||||
parser.add_argument(
|
||||
'--posterous', action='store_true', dest='posterous',
|
||||
help='Posterous export')
|
||||
parser.add_argument('--tumblr', action='store_true', dest='tumblr',
|
||||
parser.add_argument(
|
||||
'--tumblr', action='store_true', dest='tumblr',
|
||||
help='Tumblr export')
|
||||
parser.add_argument('--feed', action='store_true', dest='feed',
|
||||
parser.add_argument(
|
||||
'--feed', action='store_true', dest='feed',
|
||||
help='Feed to parse')
|
||||
parser.add_argument('-o', '--output', dest='output', default='output',
|
||||
parser.add_argument(
|
||||
'-o', '--output', dest='output', default='output',
|
||||
help='Output path')
|
||||
parser.add_argument('-m', '--markup', dest='markup', default='rst',
|
||||
parser.add_argument(
|
||||
'-m', '--markup', dest='markup', default='rst',
|
||||
help='Output markup format (supports rst & markdown)')
|
||||
parser.add_argument('--dir-cat', action='store_true', dest='dircat',
|
||||
parser.add_argument(
|
||||
'--dir-cat', action='store_true', dest='dircat',
|
||||
help='Put files in directories with categories name')
|
||||
parser.add_argument('--dir-page', action='store_true', dest='dirpage',
|
||||
parser.add_argument(
|
||||
'--dir-page', action='store_true', dest='dirpage',
|
||||
help=('Put files recognised as pages in "pages/" sub-directory'
|
||||
' (wordpress import only)'))
|
||||
parser.add_argument('--filter-author', dest='author',
|
||||
parser.add_argument(
|
||||
'--filter-author', dest='author',
|
||||
help='Import only post from the specified author')
|
||||
parser.add_argument('--strip-raw', action='store_true', dest='strip_raw',
|
||||
parser.add_argument(
|
||||
'--strip-raw', action='store_true', dest='strip_raw',
|
||||
help="Strip raw HTML code that can't be converted to "
|
||||
"markup such as flash embeds or iframes (wordpress import only)")
|
||||
parser.add_argument('--wp-custpost', action='store_true',
|
||||
parser.add_argument(
|
||||
'--wp-custpost', action='store_true',
|
||||
dest='wp_custpost',
|
||||
help='Put wordpress custom post types in directories. If used with '
|
||||
'--dir-cat option directories will be created as '
|
||||
'/post_type/category/ (wordpress import only)')
|
||||
parser.add_argument('--wp-attach', action='store_true', dest='wp_attach',
|
||||
parser.add_argument(
|
||||
'--wp-attach', action='store_true', dest='wp_attach',
|
||||
help='(wordpress import only) Download files uploaded to wordpress as '
|
||||
'attachments. Files will be added to posts as a list in the post '
|
||||
'header. All files will be downloaded, even if '
|
||||
|
|
@ -740,16 +821,20 @@ def main():
|
|||
'with their original path inside the output directory. '
|
||||
'e.g. output/wp-uploads/date/postname/file.jpg '
|
||||
'-- Requires an internet connection --')
|
||||
parser.add_argument('--disable-slugs', action='store_true',
|
||||
parser.add_argument(
|
||||
'--disable-slugs', action='store_true',
|
||||
dest='disable_slugs',
|
||||
help='Disable storing slugs from imported posts within output. '
|
||||
'With this disabled, your Pelican URLs may not be consistent '
|
||||
'with your original posts.')
|
||||
parser.add_argument('-e', '--email', dest='email',
|
||||
parser.add_argument(
|
||||
'-e', '--email', dest='email',
|
||||
help="Email address (posterous import only)")
|
||||
parser.add_argument('-p', '--password', dest='password',
|
||||
parser.add_argument(
|
||||
'-p', '--password', dest='password',
|
||||
help="Password (posterous import only)")
|
||||
parser.add_argument('-b', '--blogname', dest='blogname',
|
||||
parser.add_argument(
|
||||
'-b', '--blogname', dest='blogname',
|
||||
help="Blog name (Tumblr import only)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
|
@ -766,18 +851,20 @@ def main():
|
|||
elif args.feed:
|
||||
input_type = 'feed'
|
||||
else:
|
||||
error = "You must provide either --wpfile, --dotclear, --posterous, --tumblr or --feed options"
|
||||
error = ('You must provide either --wpfile, --dotclear, '
|
||||
'--posterous, --tumblr or --feed options')
|
||||
exit(error)
|
||||
|
||||
if not os.path.exists(args.output):
|
||||
try:
|
||||
os.mkdir(args.output)
|
||||
except OSError:
|
||||
error = "Unable to create the output folder: " + args.output
|
||||
error = 'Unable to create the output folder: ' + args.output
|
||||
exit(error)
|
||||
|
||||
if args.wp_attach and input_type != 'wordpress':
|
||||
error = "You must be importing a wordpress xml to use the --wp-attach option"
|
||||
error = ('You must be importing a wordpress xml '
|
||||
'to use the --wp-attach option')
|
||||
exit(error)
|
||||
|
||||
if input_type == 'wordpress':
|
||||
|
|
@ -796,14 +883,14 @@ def main():
|
|||
else:
|
||||
attachments = None
|
||||
|
||||
init() # init logging
|
||||
|
||||
# init logging
|
||||
init()
|
||||
fields2pelican(fields, args.markup, args.output,
|
||||
dircat=args.dircat or False,
|
||||
dirpage=args.dirpage or False,
|
||||
strip_raw=args.strip_raw or False,
|
||||
disable_slugs=args.disable_slugs or False,
|
||||
filter_author=args.author,
|
||||
wp_custpost = args.wp_custpost or False,
|
||||
wp_attach = args.wp_attach or False,
|
||||
attachments = attachments or None)
|
||||
wp_custpost=args.wp_custpost or False,
|
||||
wp_attach=args.wp_attach or False,
|
||||
attachments=attachments or None)
|
||||
|
|
|
|||
|
|
@ -1,18 +1,20 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
import six
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import codecs
|
||||
import os
|
||||
import string
|
||||
import argparse
|
||||
import sys
|
||||
import codecs
|
||||
|
||||
import pytz
|
||||
|
||||
import six
|
||||
|
||||
from pelican import __version__
|
||||
|
||||
|
||||
_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
"templates")
|
||||
|
||||
|
|
@ -44,9 +46,10 @@ CONF = {
|
|||
'timezone': 'Europe/Paris'
|
||||
}
|
||||
|
||||
#url for list of valid timezones
|
||||
# url for list of valid timezones
|
||||
_TZ_URL = 'http://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
|
||||
|
||||
|
||||
def _input_compat(prompt):
|
||||
if six.PY3:
|
||||
r = input(prompt)
|
||||
|
|
@ -59,6 +62,7 @@ if six.PY3:
|
|||
else:
|
||||
str_compat = unicode
|
||||
|
||||
|
||||
# Create a 'marked' default path, to determine if someone has supplied
|
||||
# a path on the command-line.
|
||||
class _DEFAULT_PATH_TYPE(str_compat):
|
||||
|
|
@ -66,6 +70,7 @@ class _DEFAULT_PATH_TYPE(str_compat):
|
|||
|
||||
_DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
|
||||
|
||||
|
||||
def decoding_strings(f):
|
||||
def wrapper(*args, **kwargs):
|
||||
out = f(*args, **kwargs)
|
||||
|
|
@ -164,7 +169,8 @@ def ask(question, answer=str_compat, default=None, l=None):
|
|||
print('You must enter an integer')
|
||||
return r
|
||||
else:
|
||||
raise NotImplemented('Argument `answer` must be str_compat, bool, or integer')
|
||||
raise NotImplemented(
|
||||
'Argument `answer` must be str_compat, bool, or integer')
|
||||
|
||||
|
||||
def ask_timezone(question, default, tzurl):
|
||||
|
|
@ -177,7 +183,8 @@ def ask_timezone(question, default, tzurl):
|
|||
r = pytz.all_timezones[lower_tz.index(r)]
|
||||
break
|
||||
else:
|
||||
print('Please enter a valid time zone:\n (check [{0}])'.format(tzurl))
|
||||
print('Please enter a valid time zone:\n'
|
||||
' (check [{0}])'.format(tzurl))
|
||||
return r
|
||||
|
||||
|
||||
|
|
@ -186,13 +193,13 @@ def main():
|
|||
description="A kickstarter for Pelican",
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument('-p', '--path', default=_DEFAULT_PATH,
|
||||
help="The path to generate the blog into")
|
||||
help="The path to generate the blog into")
|
||||
parser.add_argument('-t', '--title', metavar="title",
|
||||
help='Set the title of the website')
|
||||
help='Set the title of the website')
|
||||
parser.add_argument('-a', '--author', metavar="author",
|
||||
help='Set the author name of the website')
|
||||
help='Set the author name of the website')
|
||||
parser.add_argument('-l', '--lang', metavar="lang",
|
||||
help='Set the default web site language')
|
||||
help='Set the default web site language')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
|
@ -214,50 +221,94 @@ needed by Pelican.
|
|||
'Will save to:\n%s\n' % CONF['basedir'])
|
||||
else:
|
||||
CONF['basedir'] = os.path.abspath(os.path.expanduser(
|
||||
ask('Where do you want to create your new web site?', answer=str_compat, default=args.path)))
|
||||
ask('Where do you want to create your new web site?',
|
||||
answer=str_compat, default=args.path)))
|
||||
|
||||
CONF['sitename'] = ask('What will be the title of this web site?', answer=str_compat, default=args.title)
|
||||
CONF['author'] = ask('Who will be the author of this web site?', answer=str_compat, default=args.author)
|
||||
CONF['lang'] = ask('What will be the default language of this web site?', str_compat, args.lang or CONF['lang'], 2)
|
||||
CONF['sitename'] = ask('What will be the title of this web site?',
|
||||
answer=str_compat, default=args.title)
|
||||
CONF['author'] = ask('Who will be the author of this web site?',
|
||||
answer=str_compat, default=args.author)
|
||||
CONF['lang'] = ask('What will be the default language of this web site?',
|
||||
str_compat, args.lang or CONF['lang'], 2)
|
||||
|
||||
if ask('Do you want to specify a URL prefix? e.g., http://example.com ', answer=bool, default=True):
|
||||
CONF['siteurl'] = ask('What is your URL prefix? (see above example; no trailing slash)', str_compat, CONF['siteurl'])
|
||||
if ask('Do you want to specify a URL prefix? e.g., http://example.com ',
|
||||
answer=bool, default=True):
|
||||
CONF['siteurl'] = ask('What is your URL prefix? (see '
|
||||
'above example; no trailing slash)',
|
||||
str_compat, CONF['siteurl'])
|
||||
|
||||
CONF['with_pagination'] = ask('Do you want to enable article pagination?', bool, bool(CONF['default_pagination']))
|
||||
CONF['with_pagination'] = ask('Do you want to enable article pagination?',
|
||||
bool, bool(CONF['default_pagination']))
|
||||
|
||||
if CONF['with_pagination']:
|
||||
CONF['default_pagination'] = ask('How many articles per page do you want?', int, CONF['default_pagination'])
|
||||
CONF['default_pagination'] = ask('How many articles per page '
|
||||
'do you want?',
|
||||
int, CONF['default_pagination'])
|
||||
else:
|
||||
CONF['default_pagination'] = False
|
||||
|
||||
CONF['timezone'] = ask_timezone('What is your time zone?', CONF['timezone'], _TZ_URL)
|
||||
CONF['timezone'] = ask_timezone('What is your time zone?',
|
||||
CONF['timezone'], _TZ_URL)
|
||||
|
||||
automation = ask('Do you want to generate a Fabfile/Makefile to automate generation and publishing?', bool, True)
|
||||
develop = ask('Do you want an auto-reload & simpleHTTP script to assist with theme and site development?', bool, True)
|
||||
automation = ask('Do you want to generate a Fabfile/Makefile '
|
||||
'to automate generation and publishing?', bool, True)
|
||||
develop = ask('Do you want an auto-reload & simpleHTTP script '
|
||||
'to assist with theme and site development?', bool, True)
|
||||
|
||||
if automation:
|
||||
if ask('Do you want to upload your website using FTP?', answer=bool, default=False):
|
||||
CONF['ftp_host'] = ask('What is the hostname of your FTP server?', str_compat, CONF['ftp_host'])
|
||||
CONF['ftp_user'] = ask('What is your username on that server?', str_compat, CONF['ftp_user'])
|
||||
CONF['ftp_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ftp_target_dir'])
|
||||
if ask('Do you want to upload your website using SSH?', answer=bool, default=False):
|
||||
CONF['ssh_host'] = ask('What is the hostname of your SSH server?', str_compat, CONF['ssh_host'])
|
||||
CONF['ssh_port'] = ask('What is the port of your SSH server?', int, CONF['ssh_port'])
|
||||
CONF['ssh_user'] = ask('What is your username on that server?', str_compat, CONF['ssh_user'])
|
||||
CONF['ssh_target_dir'] = ask('Where do you want to put your web site on that server?', str_compat, CONF['ssh_target_dir'])
|
||||
if ask('Do you want to upload your website using Dropbox?', answer=bool, default=False):
|
||||
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', str_compat, CONF['dropbox_dir'])
|
||||
if ask('Do you want to upload your website using S3?', answer=bool, default=False):
|
||||
CONF['s3_bucket'] = ask('What is the name of your S3 bucket?', str_compat, CONF['s3_bucket'])
|
||||
if ask('Do you want to upload your website using Rackspace Cloud Files?', answer=bool, default=False):
|
||||
CONF['cloudfiles_username'] = ask('What is your Rackspace Cloud username?', str_compat, CONF['cloudfiles_username'])
|
||||
CONF['cloudfiles_api_key'] = ask('What is your Rackspace Cloud API key?', str_compat, CONF['cloudfiles_api_key'])
|
||||
CONF['cloudfiles_container'] = ask('What is the name of your Cloud Files container?', str_compat, CONF['cloudfiles_container'])
|
||||
if ask('Do you want to upload your website using GitHub Pages?', answer=bool, default=False):
|
||||
if ask('Is this your personal page (username.github.io)?', answer=bool, default=False):
|
||||
CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['personal']
|
||||
if ask('Do you want to upload your website using FTP?',
|
||||
answer=bool, default=False):
|
||||
CONF['ftp_host'] = ask('What is the hostname of your FTP server?',
|
||||
str_compat, CONF['ftp_host'])
|
||||
CONF['ftp_user'] = ask('What is your username on that server?',
|
||||
str_compat, CONF['ftp_user'])
|
||||
CONF['ftp_target_dir'] = ask('Where do you want to put your '
|
||||
'web site on that server?',
|
||||
str_compat, CONF['ftp_target_dir'])
|
||||
if ask('Do you want to upload your website using SSH?',
|
||||
answer=bool, default=False):
|
||||
CONF['ssh_host'] = ask('What is the hostname of your SSH server?',
|
||||
str_compat, CONF['ssh_host'])
|
||||
CONF['ssh_port'] = ask('What is the port of your SSH server?',
|
||||
int, CONF['ssh_port'])
|
||||
CONF['ssh_user'] = ask('What is your username on that server?',
|
||||
str_compat, CONF['ssh_user'])
|
||||
CONF['ssh_target_dir'] = ask('Where do you want to put your '
|
||||
'web site on that server?',
|
||||
str_compat, CONF['ssh_target_dir'])
|
||||
|
||||
if ask('Do you want to upload your website using Dropbox?',
|
||||
answer=bool, default=False):
|
||||
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',
|
||||
str_compat, CONF['dropbox_dir'])
|
||||
|
||||
if ask('Do you want to upload your website using S3?',
|
||||
answer=bool, default=False):
|
||||
CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',
|
||||
str_compat, CONF['s3_bucket'])
|
||||
|
||||
if ask('Do you want to upload your website using '
|
||||
'Rackspace Cloud Files?', answer=bool, default=False):
|
||||
CONF['cloudfiles_username'] = ask('What is your Rackspace '
|
||||
'Cloud username?', str_compat,
|
||||
CONF['cloudfiles_username'])
|
||||
CONF['cloudfiles_api_key'] = ask('What is your Rackspace '
|
||||
'Cloud API key?', str_compat,
|
||||
CONF['cloudfiles_api_key'])
|
||||
CONF['cloudfiles_container'] = ask('What is the name of your '
|
||||
'Cloud Files container?',
|
||||
str_compat,
|
||||
CONF['cloudfiles_container'])
|
||||
|
||||
if ask('Do you want to upload your website using GitHub Pages?',
|
||||
answer=bool, default=False):
|
||||
if ask('Is this your personal page (username.github.io)?',
|
||||
answer=bool, default=False):
|
||||
CONF['github_pages_branch'] = \
|
||||
_GITHUB_PAGES_BRANCHES['personal']
|
||||
else:
|
||||
CONF['github_pages_branch'] = _GITHUB_PAGES_BRANCHES['project']
|
||||
CONF['github_pages_branch'] = \
|
||||
_GITHUB_PAGES_BRANCHES['project']
|
||||
|
||||
try:
|
||||
os.makedirs(os.path.join(CONF['basedir'], 'content'))
|
||||
|
|
@ -270,7 +321,8 @@ needed by Pelican.
|
|||
print('Error: {0}'.format(e))
|
||||
|
||||
try:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'), 'w', 'utf-8') as fd:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'pelicanconf.py'),
|
||||
'w', 'utf-8') as fd:
|
||||
conf_python = dict()
|
||||
for key, value in CONF.items():
|
||||
conf_python[key] = repr(value)
|
||||
|
|
@ -283,7 +335,8 @@ needed by Pelican.
|
|||
print('Error: {0}'.format(e))
|
||||
|
||||
try:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'), 'w', 'utf-8') as fd:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'publishconf.py'),
|
||||
'w', 'utf-8') as fd:
|
||||
for line in get_template('publishconf.py'):
|
||||
template = string.Template(line)
|
||||
fd.write(template.safe_substitute(CONF))
|
||||
|
|
@ -293,7 +346,8 @@ needed by Pelican.
|
|||
|
||||
if automation:
|
||||
try:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'), 'w', 'utf-8') as fd:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'fabfile.py'),
|
||||
'w', 'utf-8') as fd:
|
||||
for line in get_template('fabfile.py'):
|
||||
template = string.Template(line)
|
||||
fd.write(template.safe_substitute(CONF))
|
||||
|
|
@ -301,7 +355,8 @@ needed by Pelican.
|
|||
except OSError as e:
|
||||
print('Error: {0}'.format(e))
|
||||
try:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'Makefile'), 'w', 'utf-8') as fd:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'Makefile'),
|
||||
'w', 'utf-8') as fd:
|
||||
mkfile_template_name = 'Makefile'
|
||||
py_v = 'PY?=python'
|
||||
if six.PY3:
|
||||
|
|
@ -323,7 +378,9 @@ needed by Pelican.
|
|||
value = '"' + value.replace('"', '\\"') + '"'
|
||||
conf_shell[key] = value
|
||||
try:
|
||||
with codecs.open(os.path.join(CONF['basedir'], 'develop_server.sh'), 'w', 'utf-8') as fd:
|
||||
with codecs.open(os.path.join(CONF['basedir'],
|
||||
'develop_server.sh'),
|
||||
'w', 'utf-8') as fd:
|
||||
lines = list(get_template('develop_server.sh'))
|
||||
py_v = 'PY=${PY:-python}\n'
|
||||
if six.PY3:
|
||||
|
|
@ -333,7 +390,10 @@ needed by Pelican.
|
|||
template = string.Template(line)
|
||||
fd.write(template.safe_substitute(conf_shell))
|
||||
fd.close()
|
||||
os.chmod((os.path.join(CONF['basedir'], 'develop_server.sh')), 493) # mode 0o755
|
||||
|
||||
# mode 0o755
|
||||
os.chmod((os.path.join(CONF['basedir'],
|
||||
'develop_server.sh')), 493)
|
||||
except OSError as e:
|
||||
print('Error: {0}'.format(e))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,33 +1,12 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
|
||||
import six
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
try:
|
||||
import pelican
|
||||
except:
|
||||
err('Cannot import pelican.\nYou must install Pelican in order to run this script.', -1)
|
||||
|
||||
|
||||
global _THEMES_PATH
|
||||
_THEMES_PATH = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.abspath(
|
||||
pelican.__file__
|
||||
)
|
||||
),
|
||||
'themes'
|
||||
)
|
||||
|
||||
__version__ = '0.2'
|
||||
_BUILTIN_THEMES = ['simple', 'notmyidea']
|
||||
|
||||
|
||||
def err(msg, die=None):
|
||||
"""Print an error message and exits if an exit code is given"""
|
||||
|
|
@ -35,43 +14,71 @@ def err(msg, die=None):
|
|||
if die:
|
||||
sys.exit((die if type(die) is int else 1))
|
||||
|
||||
try:
|
||||
import pelican
|
||||
except:
|
||||
err('Cannot import pelican.\nYou must '
|
||||
'install Pelican in order to run this script.',
|
||||
-1)
|
||||
|
||||
|
||||
global _THEMES_PATH
|
||||
_THEMES_PATH = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.abspath(pelican.__file__)
|
||||
),
|
||||
'themes'
|
||||
)
|
||||
|
||||
__version__ = '0.2'
|
||||
_BUILTIN_THEMES = ['simple', 'notmyidea']
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function"""
|
||||
|
||||
parser = argparse.ArgumentParser(description="""Install themes for Pelican""")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""Install themes for Pelican""")
|
||||
|
||||
excl= parser.add_mutually_exclusive_group()
|
||||
excl.add_argument('-l', '--list', dest='action', action="store_const", const='list',
|
||||
excl = parser.add_mutually_exclusive_group()
|
||||
excl.add_argument(
|
||||
'-l', '--list', dest='action', action="store_const", const='list',
|
||||
help="Show the themes already installed and exit")
|
||||
excl.add_argument('-p', '--path', dest='action', action="store_const", const='path',
|
||||
excl.add_argument(
|
||||
'-p', '--path', dest='action', action="store_const", const='path',
|
||||
help="Show the themes path and exit")
|
||||
excl.add_argument('-V', '--version', action='version', version='pelican-themes v{0}'.format(__version__),
|
||||
excl.add_argument(
|
||||
'-V', '--version', action='version',
|
||||
version='pelican-themes v{0}'.format(__version__),
|
||||
help='Print the version of this script')
|
||||
|
||||
|
||||
parser.add_argument('-i', '--install', dest='to_install', nargs='+', metavar="theme path",
|
||||
parser.add_argument(
|
||||
'-i', '--install', dest='to_install', nargs='+', metavar="theme path",
|
||||
help='The themes to install')
|
||||
parser.add_argument('-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
|
||||
parser.add_argument(
|
||||
'-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
|
||||
help='The themes to remove')
|
||||
parser.add_argument('-U', '--upgrade', dest='to_upgrade', nargs='+',
|
||||
metavar="theme path", help='The themes to upgrade')
|
||||
parser.add_argument('-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
|
||||
help="Same as `--install', but create a symbolic link instead of copying the theme. Useful for theme development")
|
||||
parser.add_argument('-c', '--clean', dest='clean', action="store_true",
|
||||
parser.add_argument(
|
||||
'-U', '--upgrade', dest='to_upgrade', nargs='+',
|
||||
metavar="theme path", help='The themes to upgrade')
|
||||
parser.add_argument(
|
||||
'-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
|
||||
help="Same as `--install', but create a symbolic link instead of "
|
||||
"copying the theme. Useful for theme development")
|
||||
parser.add_argument(
|
||||
'-c', '--clean', dest='clean', action="store_true",
|
||||
help="Remove the broken symbolic links of the theme path")
|
||||
|
||||
|
||||
parser.add_argument('-v', '--verbose', dest='verbose', action="store_true",
|
||||
parser.add_argument(
|
||||
'-v', '--verbose', dest='verbose',
|
||||
action="store_true",
|
||||
help="Verbose output")
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
to_install = args.to_install or args.to_upgrade
|
||||
to_sym = args.to_symlink or args.clean
|
||||
|
||||
|
||||
if args.action:
|
||||
if args.action is 'list':
|
||||
list_themes(args.verbose)
|
||||
|
|
@ -95,7 +102,7 @@ def main():
|
|||
if args.to_upgrade:
|
||||
if args.verbose:
|
||||
print('Upgrading themes...')
|
||||
|
||||
|
||||
for i in args.to_upgrade:
|
||||
install(i, v=args.verbose, u=True)
|
||||
|
||||
|
|
@ -144,11 +151,13 @@ def list_themes(v=False):
|
|||
def remove(theme_name, v=False):
|
||||
"""Removes a theme"""
|
||||
|
||||
theme_name = theme_name.replace('/','')
|
||||
theme_name = theme_name.replace('/', '')
|
||||
target = os.path.join(_THEMES_PATH, theme_name)
|
||||
|
||||
if theme_name in _BUILTIN_THEMES:
|
||||
err(theme_name + ' is a builtin theme.\nYou cannot remove a builtin theme with this script, remove it by hand if you want.')
|
||||
err(theme_name + ' is a builtin theme.\n'
|
||||
'You cannot remove a builtin theme with this script, '
|
||||
'remove it by hand if you want.')
|
||||
elif os.path.islink(target):
|
||||
if v:
|
||||
print('Removing link `' + target + "'")
|
||||
|
|
@ -180,7 +189,8 @@ def install(path, v=False, u=False):
|
|||
install(path, v)
|
||||
else:
|
||||
if v:
|
||||
print("Copying `{p}' to `{t}' ...".format(p=path, t=theme_path))
|
||||
print("Copying '{p}' to '{t}' ...".format(p=path,
|
||||
t=theme_path))
|
||||
try:
|
||||
shutil.copytree(path, theme_path)
|
||||
|
||||
|
|
@ -189,14 +199,18 @@ def install(path, v=False, u=False):
|
|||
for root, dirs, files in os.walk(theme_path):
|
||||
for d in dirs:
|
||||
dname = os.path.join(root, d)
|
||||
os.chmod(dname, 493) # 0o755
|
||||
os.chmod(dname, 493) # 0o755
|
||||
for f in files:
|
||||
fname = os.path.join(root, f)
|
||||
os.chmod(fname, 420) # 0o644
|
||||
os.chmod(fname, 420) # 0o644
|
||||
except OSError as e:
|
||||
err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False)
|
||||
err("Cannot change permissions of files "
|
||||
"or directory in `{r}':\n{e}".format(r=theme_path,
|
||||
e=str(e)),
|
||||
die=False)
|
||||
except Exception as e:
|
||||
err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
|
||||
err("Cannot copy `{p}' to `{t}':\n{e}".format(
|
||||
p=path, t=theme_path, e=str(e)))
|
||||
|
||||
|
||||
def symlink(path, v=False):
|
||||
|
|
@ -212,11 +226,13 @@ def symlink(path, v=False):
|
|||
err(path + ' : already exists')
|
||||
else:
|
||||
if v:
|
||||
print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
|
||||
print("Linking `{p}' to `{t}' ...".format(
|
||||
p=path, t=theme_path))
|
||||
try:
|
||||
os.symlink(path, theme_path)
|
||||
except Exception as e:
|
||||
err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
|
||||
err("Cannot link `{p}' to `{t}':\n{e}".format(
|
||||
p=path, t=theme_path, e=str(e)))
|
||||
|
||||
|
||||
def is_broken_link(path):
|
||||
|
|
@ -227,7 +243,7 @@ def is_broken_link(path):
|
|||
|
||||
def clean(v=False):
|
||||
"""Removes the broken symbolic links"""
|
||||
c=0
|
||||
c = 0
|
||||
for path in os.listdir(_THEMES_PATH):
|
||||
path = os.path.join(_THEMES_PATH, path)
|
||||
if os.path.islink(path):
|
||||
|
|
@ -236,9 +252,9 @@ def clean(v=False):
|
|||
print('Removing {0}'.format(path))
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as e:
|
||||
except OSError:
|
||||
print('Error: cannot remove {0}'.format(path))
|
||||
else:
|
||||
c+=1
|
||||
c += 1
|
||||
|
||||
print("\nRemoved {0} broken links".format(c))
|
||||
|
|
|
|||
|
|
@ -4,9 +4,10 @@ from __future__ import unicode_literals
|
|||
import functools
|
||||
import logging
|
||||
import os
|
||||
|
||||
import six
|
||||
|
||||
from pelican.utils import (slugify, python_2_unicode_compatible)
|
||||
from pelican.utils import python_2_unicode_compatible, slugify
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,29 +1,30 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals, print_function
|
||||
import six
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
import codecs
|
||||
import datetime
|
||||
import errno
|
||||
import fnmatch
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import pytz
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import traceback
|
||||
import pickle
|
||||
import datetime
|
||||
|
||||
from collections import Hashable
|
||||
from contextlib import contextmanager
|
||||
import dateutil.parser
|
||||
from functools import partial
|
||||
from itertools import groupby
|
||||
from jinja2 import Markup
|
||||
from operator import attrgetter
|
||||
from posixpath import join as posix_join
|
||||
|
||||
import dateutil.parser
|
||||
|
||||
from jinja2 import Markup
|
||||
|
||||
import pytz
|
||||
|
||||
import six
|
||||
from six.moves.html_parser import HTMLParser
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -43,9 +44,9 @@ def strftime(date, date_format):
|
|||
formatting them with the date, (if necessary) decoding the output and
|
||||
replacing formatted output back.
|
||||
'''
|
||||
|
||||
def strip_zeros(x):
|
||||
return x.lstrip('0') or '0'
|
||||
c89_directives = 'aAbBcdfHIjmMpSUwWxXyYzZ%'
|
||||
strip_zeros = lambda x: x.lstrip('0') or '0'
|
||||
|
||||
# grab candidate format options
|
||||
format_options = '%[-]?.'
|
||||
|
|
@ -200,8 +201,8 @@ def deprecated_attribute(old, new, since=None, remove=None, doc=None):
|
|||
' and will be removed by version {}'.format(version))
|
||||
message.append('. Use {} instead.'.format(new))
|
||||
logger.warning(''.join(message))
|
||||
logger.debug(''.join(
|
||||
six.text_type(x) for x in traceback.format_stack()))
|
||||
logger.debug(''.join(six.text_type(x) for x
|
||||
in traceback.format_stack()))
|
||||
|
||||
def fget(self):
|
||||
_warn()
|
||||
|
|
@ -224,7 +225,7 @@ def get_date(string):
|
|||
"""
|
||||
string = re.sub(' +', ' ', string)
|
||||
default = SafeDatetime.now().replace(hour=0, minute=0,
|
||||
second=0, microsecond=0)
|
||||
second=0, microsecond=0)
|
||||
try:
|
||||
return dateutil.parser.parse(string, default=default)
|
||||
except (TypeError, ValueError):
|
||||
|
|
@ -319,12 +320,12 @@ def copy(source, destination, ignores=None):
|
|||
|
||||
for src_dir, subdirs, others in os.walk(source_):
|
||||
dst_dir = os.path.join(destination_,
|
||||
os.path.relpath(src_dir, source_))
|
||||
os.path.relpath(src_dir, source_))
|
||||
|
||||
subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i)
|
||||
for i in ignores))
|
||||
others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
|
||||
for i in ignores))
|
||||
others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
|
||||
for i in ignores))
|
||||
|
||||
if not os.path.isdir(dst_dir):
|
||||
logger.info('Creating directory %s', dst_dir)
|
||||
|
|
@ -338,9 +339,11 @@ def copy(source, destination, ignores=None):
|
|||
logger.info('Copying %s to %s', src_path, dst_path)
|
||||
shutil.copy2(src_path, dst_path)
|
||||
else:
|
||||
logger.warning('Skipped copy %s (not a file or directory) to %s',
|
||||
logger.warning('Skipped copy %s (not a file or '
|
||||
'directory) to %s',
|
||||
src_path, dst_path)
|
||||
|
||||
|
||||
def clean_output_dir(path, retention):
|
||||
"""Remove all files from output directory except those in retention list"""
|
||||
|
||||
|
|
@ -366,8 +369,8 @@ def clean_output_dir(path, retention):
|
|||
shutil.rmtree(file)
|
||||
logger.debug("Deleted directory %s", file)
|
||||
except Exception as e:
|
||||
logger.error("Unable to delete directory %s; %s",
|
||||
file, e)
|
||||
logger.error("Unable to delete directory %s; %s",
|
||||
file, e)
|
||||
elif os.path.isfile(file) or os.path.islink(file):
|
||||
try:
|
||||
os.remove(file)
|
||||
|
|
@ -507,12 +510,12 @@ def process_translations(content_list, order_by=None):
|
|||
|
||||
for slug, items in grouped_by_slugs:
|
||||
items = list(items)
|
||||
# items with `translation` metadata will be used as translations…
|
||||
# items with `translation` metadata will be used as translations...
|
||||
default_lang_items = list(filter(
|
||||
lambda i: i.metadata.get('translation', 'false').lower()
|
||||
== 'false',
|
||||
items))
|
||||
# …unless all items with that slug are translations
|
||||
lambda i:
|
||||
i.metadata.get('translation', 'false').lower() == 'false',
|
||||
items))
|
||||
# ...unless all items with that slug are translations
|
||||
if not default_lang_items:
|
||||
default_lang_items = items
|
||||
|
||||
|
|
@ -522,13 +525,14 @@ def process_translations(content_list, order_by=None):
|
|||
len_ = len(lang_items)
|
||||
if len_ > 1:
|
||||
logger.warning('There are %s variants of "%s" with lang %s',
|
||||
len_, slug, lang)
|
||||
len_, slug, lang)
|
||||
for x in lang_items:
|
||||
logger.warning('\t%s', x.source_path)
|
||||
|
||||
# find items with default language
|
||||
default_lang_items = list(filter(attrgetter('in_default_lang'),
|
||||
default_lang_items))
|
||||
default_lang_items = list(filter(
|
||||
attrgetter('in_default_lang'),
|
||||
default_lang_items))
|
||||
|
||||
# if there is no article with default language, take an other one
|
||||
if not default_lang_items:
|
||||
|
|
@ -536,10 +540,9 @@ def process_translations(content_list, order_by=None):
|
|||
|
||||
if not slug:
|
||||
logger.warning(
|
||||
'empty slug for %s. '
|
||||
'You can fix this by adding a title or a slug to your '
|
||||
'content',
|
||||
default_lang_items[0].source_path)
|
||||
'Empty slug for %s. You can fix this by '
|
||||
'adding a title or a slug to your content',
|
||||
default_lang_items[0].source_path)
|
||||
index.extend(default_lang_items)
|
||||
translations.extend([x for x in items if x not in default_lang_items])
|
||||
for a in items:
|
||||
|
|
@ -567,10 +570,12 @@ def process_translations(content_list, order_by=None):
|
|||
index.sort(key=attrgetter(order_by),
|
||||
reverse=order_reversed)
|
||||
except AttributeError:
|
||||
logger.warning('There is no "%s" attribute in the item '
|
||||
logger.warning(
|
||||
'There is no "%s" attribute in the item '
|
||||
'metadata. Defaulting to slug order.', order_by)
|
||||
else:
|
||||
logger.warning('Invalid *_ORDER_BY setting (%s).'
|
||||
logger.warning(
|
||||
'Invalid *_ORDER_BY setting (%s).'
|
||||
'Valid options are strings and functions.', order_by)
|
||||
|
||||
return index, translations
|
||||
|
|
@ -589,12 +594,12 @@ def folder_watcher(path, extensions, ignores=[]):
|
|||
dirs[:] = [x for x in dirs if not x.startswith(os.curdir)]
|
||||
|
||||
for f in files:
|
||||
if (f.endswith(tuple(extensions)) and
|
||||
not any(fnmatch.fnmatch(f, ignore) for ignore in ignores)):
|
||||
try:
|
||||
yield os.stat(os.path.join(root, f)).st_mtime
|
||||
except OSError as e:
|
||||
logger.warning('Caught Exception: %s', e)
|
||||
if f.endswith(tuple(extensions)) and \
|
||||
not any(fnmatch.fnmatch(f, ignore) for ignore in ignores):
|
||||
try:
|
||||
yield os.stat(os.path.join(root, f)).st_mtime
|
||||
except OSError as e:
|
||||
logger.warning('Caught Exception: %s', e)
|
||||
|
||||
LAST_MTIME = 0
|
||||
while True:
|
||||
|
|
|
|||
|
|
@ -1,22 +1,24 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import with_statement, unicode_literals, print_function
|
||||
import six
|
||||
from __future__ import print_function, unicode_literals, with_statement
|
||||
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
|
||||
from feedgenerator import Atom1Feed, Rss201rev2Feed
|
||||
|
||||
from jinja2 import Markup
|
||||
|
||||
import six
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
from pelican import signals
|
||||
from pelican.paginator import Paginator
|
||||
from pelican.utils import (get_relative_path, is_selected_for_writing,
|
||||
path_to_url, set_date_tzinfo)
|
||||
|
||||
if not six.PY3:
|
||||
from codecs import open
|
||||
|
||||
from feedgenerator import Atom1Feed, Rss201rev2Feed
|
||||
from jinja2 import Markup
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
from pelican.paginator import Paginator
|
||||
from pelican.utils import (get_relative_path, path_to_url, set_date_tzinfo,
|
||||
is_selected_for_writing)
|
||||
from pelican import signals
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -119,10 +121,10 @@ class Writer(object):
|
|||
feed.write(fp, 'utf-8')
|
||||
logger.info('Writing %s', complete_path)
|
||||
|
||||
signals.feed_written.send(complete_path, context=context, feed=feed)
|
||||
signals.feed_written.send(
|
||||
complete_path, context=context, feed=feed)
|
||||
return feed
|
||||
|
||||
|
||||
def write_file(self, name, template, context, relative_urls=False,
|
||||
paginated=None, override_output=False, **kwargs):
|
||||
"""Render the template and write the file.
|
||||
|
|
@ -139,9 +141,10 @@ class Writer(object):
|
|||
:param **kwargs: additional variables to pass to the templates
|
||||
"""
|
||||
|
||||
if name is False or name == "" or\
|
||||
not is_selected_for_writing(self.settings,\
|
||||
os.path.join(self.output_path, name)):
|
||||
if name is False or \
|
||||
name == "" or \
|
||||
not is_selected_for_writing(self.settings,
|
||||
os.path.join(self.output_path, name)):
|
||||
return
|
||||
elif not name:
|
||||
# other stuff, just return for now
|
||||
|
|
@ -169,7 +172,8 @@ class Writer(object):
|
|||
|
||||
def _get_localcontext(context, name, kwargs, relative_urls):
|
||||
localcontext = context.copy()
|
||||
localcontext['localsiteurl'] = localcontext.get('localsiteurl', None)
|
||||
localcontext['localsiteurl'] = localcontext.get(
|
||||
'localsiteurl', None)
|
||||
if relative_urls:
|
||||
relative_url = path_to_url(get_relative_path(name))
|
||||
localcontext['SITEURL'] = relative_url
|
||||
|
|
@ -201,11 +205,13 @@ class Writer(object):
|
|||
'%s_previous_page' % key: previous_page,
|
||||
'%s_next_page' % key: next_page})
|
||||
|
||||
localcontext = _get_localcontext(context, page.save_as, paginated_kwargs, relative_urls)
|
||||
localcontext = _get_localcontext(
|
||||
context, page.save_as, paginated_kwargs, relative_urls)
|
||||
_write_file(template, localcontext, self.output_path,
|
||||
page.save_as, override_output)
|
||||
else:
|
||||
# no pagination
|
||||
localcontext = _get_localcontext(context, name, kwargs, relative_urls)
|
||||
localcontext = _get_localcontext(
|
||||
context, name, kwargs, relative_urls)
|
||||
_write_file(template, localcontext, self.output_path, name,
|
||||
override_output)
|
||||
|
|
|
|||
15
tox.ini
15
tox.ini
|
|
@ -1,5 +1,5 @@
|
|||
[tox]
|
||||
envlist = py{27,33,34},docs
|
||||
envlist = py{27,33,34},docs,flake8
|
||||
|
||||
[testenv]
|
||||
basepython =
|
||||
|
|
@ -27,3 +27,16 @@ deps =
|
|||
changedir = docs
|
||||
commands =
|
||||
sphinx-build -W -b html -d {envtmpdir}/doctrees . _build/html
|
||||
|
||||
[flake8]
|
||||
application-import-names = pelican
|
||||
import-order-style = cryptography
|
||||
|
||||
[testenv:flake8]
|
||||
basepython = python2.7
|
||||
deps =
|
||||
flake8 <= 2.4.1
|
||||
git+https://github.com/public/flake8-import-order@2ac7052a4e02b4a8a0125a106d87465a3b9fd688
|
||||
commands =
|
||||
flake8 --version
|
||||
flake8 pelican
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue