From cabdb26cee66e1173cf16cb31d3fe5f9fa4392e7 Mon Sep 17 00:00:00 2001
From: Chris Rose
Date: Sun, 29 Oct 2023 22:18:29 +0100
Subject: [PATCH] Apply code style to project via: ruff format .
---
pelican/__init__.py | 530 +++---
pelican/__main__.py | 2 +-
pelican/cache.py | 52 +-
pelican/contents.py | 323 ++--
pelican/generators.py | 796 +++++----
pelican/log.py | 45 +-
pelican/paginator.py | 47 +-
pelican/plugins/_utils.py | 36 +-
pelican/plugins/signals.py | 56 +-
pelican/readers.py | 416 ++---
pelican/rstdirectives.py | 60 +-
pelican/server.py | 110 +-
pelican/settings.py | 819 +++++----
pelican/signals.py | 4 +-
pelican/tests/default_conf.py | 44 +-
.../pelican/plugins/ns_plugin/__init__.py | 2 +-
pelican/tests/support.py | 80 +-
pelican/tests/test_cache.py | 235 ++-
pelican/tests/test_cli.py | 77 +-
pelican/tests/test_contents.py | 870 +++++-----
pelican/tests/test_generators.py | 1500 ++++++++++-------
pelican/tests/test_importer.py | 638 ++++---
pelican/tests/test_log.py | 45 +-
pelican/tests/test_paginator.py | 111 +-
pelican/tests/test_pelican.py | 250 +--
pelican/tests/test_plugins.py | 118 +-
pelican/tests/test_readers.py | 853 +++++-----
pelican/tests/test_rstdirectives.py | 12 +-
pelican/tests/test_server.py | 36 +-
pelican/tests/test_settings.py | 292 ++--
pelican/tests/test_testsuite.py | 3 +-
pelican/tests/test_urlwrappers.py | 83 +-
pelican/tests/test_utils.py | 920 +++++-----
pelican/tools/pelican_import.py | 963 ++++++-----
pelican/tools/pelican_quickstart.py | 367 ++--
pelican/tools/pelican_themes.py | 181 +-
pelican/urlwrappers.py | 42 +-
pelican/utils.py | 323 ++--
pelican/writers.py | 185 +-
samples/pelican.conf.py | 52 +-
samples/pelican.conf_FR.py | 54 +-
41 files changed, 6487 insertions(+), 5145 deletions(-)
diff --git a/pelican/__init__.py b/pelican/__init__.py
index fcdda8a4..a0ff4989 100644
--- a/pelican/__init__.py
+++ b/pelican/__init__.py
@@ -9,19 +9,25 @@ import sys
import time
import traceback
from collections.abc import Iterable
+
# Combines all paths to `pelican` package accessible from `sys.path`
# Makes it possible to install `pelican` and namespace plugins into different
# locations in the file system (e.g. pip with `-e` or `--user`)
from pkgutil import extend_path
+
__path__ = extend_path(__path__, __name__)
# pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import console
from pelican.log import init as init_logging
-from pelican.generators import (ArticlesGenerator, # noqa: I100
- PagesGenerator, SourceFileGenerator,
- StaticGenerator, TemplatePagesGenerator)
+from pelican.generators import (
+ ArticlesGenerator, # noqa: I100
+ PagesGenerator,
+ SourceFileGenerator,
+ StaticGenerator,
+ TemplatePagesGenerator,
+)
from pelican.plugins import signals
from pelican.plugins._utils import get_plugin_name, load_plugins
from pelican.readers import Readers
@@ -35,12 +41,11 @@ try:
except Exception:
__version__ = "unknown"
-DEFAULT_CONFIG_NAME = 'pelicanconf.py'
+DEFAULT_CONFIG_NAME = "pelicanconf.py"
logger = logging.getLogger(__name__)
class Pelican:
-
def __init__(self, settings):
"""Pelican initialization
@@ -50,35 +55,34 @@ class Pelican:
# define the default settings
self.settings = settings
- self.path = settings['PATH']
- self.theme = settings['THEME']
- self.output_path = settings['OUTPUT_PATH']
- self.ignore_files = settings['IGNORE_FILES']
- self.delete_outputdir = settings['DELETE_OUTPUT_DIRECTORY']
- self.output_retention = settings['OUTPUT_RETENTION']
+ self.path = settings["PATH"]
+ self.theme = settings["THEME"]
+ self.output_path = settings["OUTPUT_PATH"]
+ self.ignore_files = settings["IGNORE_FILES"]
+ self.delete_outputdir = settings["DELETE_OUTPUT_DIRECTORY"]
+ self.output_retention = settings["OUTPUT_RETENTION"]
self.init_path()
self.init_plugins()
signals.initialized.send(self)
def init_path(self):
- if not any(p in sys.path for p in ['', os.curdir]):
+ if not any(p in sys.path for p in ["", os.curdir]):
logger.debug("Adding current directory to system path")
- sys.path.insert(0, '')
+ sys.path.insert(0, "")
def init_plugins(self):
self.plugins = []
for plugin in load_plugins(self.settings):
name = get_plugin_name(plugin)
- logger.debug('Registering plugin `%s`', name)
+ logger.debug("Registering plugin `%s`", name)
try:
plugin.register()
self.plugins.append(plugin)
except Exception as e:
- logger.error('Cannot register plugin `%s`\n%s',
- name, e)
+ logger.error("Cannot register plugin `%s`\n%s", name, e)
- self.settings['PLUGINS'] = [get_plugin_name(p) for p in self.plugins]
+ self.settings["PLUGINS"] = [get_plugin_name(p) for p in self.plugins]
def run(self):
"""Run the generators and return"""
@@ -87,10 +91,10 @@ class Pelican:
context = self.settings.copy()
# Share these among all the generators and content objects
# They map source paths to Content objects or None
- context['generated_content'] = {}
- context['static_links'] = set()
- context['static_content'] = {}
- context['localsiteurl'] = self.settings['SITEURL']
+ context["generated_content"] = {}
+ context["static_links"] = set()
+ context["static_content"] = {}
+ context["localsiteurl"] = self.settings["SITEURL"]
generators = [
cls(
@@ -99,23 +103,25 @@ class Pelican:
path=self.path,
theme=self.theme,
output_path=self.output_path,
- ) for cls in self._get_generator_classes()
+ )
+ for cls in self._get_generator_classes()
]
# Delete the output directory if (1) the appropriate setting is True
# and (2) that directory is not the parent of the source directory
- if (self.delete_outputdir
- and os.path.commonpath([os.path.realpath(self.output_path)]) !=
- os.path.commonpath([os.path.realpath(self.output_path),
- os.path.realpath(self.path)])):
+ if self.delete_outputdir and os.path.commonpath(
+ [os.path.realpath(self.output_path)]
+ ) != os.path.commonpath(
+ [os.path.realpath(self.output_path), os.path.realpath(self.path)]
+ ):
clean_output_dir(self.output_path, self.output_retention)
for p in generators:
- if hasattr(p, 'generate_context'):
+ if hasattr(p, "generate_context"):
p.generate_context()
for p in generators:
- if hasattr(p, 'refresh_metadata_intersite_links'):
+ if hasattr(p, "refresh_metadata_intersite_links"):
p.refresh_metadata_intersite_links()
signals.all_generators_finalized.send(generators)
@@ -123,61 +129,75 @@ class Pelican:
writer = self._get_writer()
for p in generators:
- if hasattr(p, 'generate_output'):
+ if hasattr(p, "generate_output"):
p.generate_output(writer)
signals.finalized.send(self)
- articles_generator = next(g for g in generators
- if isinstance(g, ArticlesGenerator))
- pages_generator = next(g for g in generators
- if isinstance(g, PagesGenerator))
+ articles_generator = next(
+ g for g in generators if isinstance(g, ArticlesGenerator)
+ )
+ pages_generator = next(g for g in generators if isinstance(g, PagesGenerator))
pluralized_articles = maybe_pluralize(
- (len(articles_generator.articles) +
- len(articles_generator.translations)),
- 'article',
- 'articles')
+ (len(articles_generator.articles) + len(articles_generator.translations)),
+ "article",
+ "articles",
+ )
pluralized_drafts = maybe_pluralize(
- (len(articles_generator.drafts) +
- len(articles_generator.drafts_translations)),
- 'draft',
- 'drafts')
+ (
+ len(articles_generator.drafts)
+ + len(articles_generator.drafts_translations)
+ ),
+ "draft",
+ "drafts",
+ )
pluralized_hidden_articles = maybe_pluralize(
- (len(articles_generator.hidden_articles) +
- len(articles_generator.hidden_translations)),
- 'hidden article',
- 'hidden articles')
+ (
+ len(articles_generator.hidden_articles)
+ + len(articles_generator.hidden_translations)
+ ),
+ "hidden article",
+ "hidden articles",
+ )
pluralized_pages = maybe_pluralize(
- (len(pages_generator.pages) +
- len(pages_generator.translations)),
- 'page',
- 'pages')
+ (len(pages_generator.pages) + len(pages_generator.translations)),
+ "page",
+ "pages",
+ )
pluralized_hidden_pages = maybe_pluralize(
- (len(pages_generator.hidden_pages) +
- len(pages_generator.hidden_translations)),
- 'hidden page',
- 'hidden pages')
+ (
+ len(pages_generator.hidden_pages)
+ + len(pages_generator.hidden_translations)
+ ),
+ "hidden page",
+ "hidden pages",
+ )
pluralized_draft_pages = maybe_pluralize(
- (len(pages_generator.draft_pages) +
- len(pages_generator.draft_translations)),
- 'draft page',
- 'draft pages')
+ (
+ len(pages_generator.draft_pages)
+ + len(pages_generator.draft_translations)
+ ),
+ "draft page",
+ "draft pages",
+ )
- console.print('Done: Processed {}, {}, {}, {}, {} and {} in {:.2f} seconds.'
- .format(
- pluralized_articles,
- pluralized_drafts,
- pluralized_hidden_articles,
- pluralized_pages,
- pluralized_hidden_pages,
- pluralized_draft_pages,
- time.time() - start_time))
+ console.print(
+ "Done: Processed {}, {}, {}, {}, {} and {} in {:.2f} seconds.".format(
+ pluralized_articles,
+ pluralized_drafts,
+ pluralized_hidden_articles,
+ pluralized_pages,
+ pluralized_hidden_pages,
+ pluralized_draft_pages,
+ time.time() - start_time,
+ )
+ )
def _get_generator_classes(self):
discovered_generators = [
(ArticlesGenerator, "internal"),
- (PagesGenerator, "internal")
+ (PagesGenerator, "internal"),
]
if self.settings["TEMPLATE_PAGES"]:
@@ -236,7 +256,7 @@ class PrintSettings(argparse.Action):
except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e)
console.print_exception()
- sys.exit(getattr(e, 'exitcode', 1))
+ sys.exit(getattr(e, "exitcode", 1))
if values:
# One or more arguments provided, so only print those settings
@@ -244,14 +264,16 @@ class PrintSettings(argparse.Action):
if setting in settings:
# Only add newline between setting name and value if dict
if isinstance(settings[setting], (dict, tuple, list)):
- setting_format = '\n{}:\n{}'
+ setting_format = "\n{}:\n{}"
else:
- setting_format = '\n{}: {}'
- console.print(setting_format.format(
- setting,
- pprint.pformat(settings[setting])))
+ setting_format = "\n{}: {}"
+ console.print(
+ setting_format.format(
+ setting, pprint.pformat(settings[setting])
+ )
+ )
else:
- console.print('\n{} is not a recognized setting.'.format(setting))
+ console.print("\n{} is not a recognized setting.".format(setting))
break
else:
# No argument was given to --print-settings, so print all settings
@@ -268,170 +290,258 @@ class ParseOverrides(argparse.Action):
k, v = item.split("=", 1)
except ValueError:
raise ValueError(
- 'Extra settings must be specified as KEY=VALUE pairs '
- f'but you specified {item}'
+ "Extra settings must be specified as KEY=VALUE pairs "
+ f"but you specified {item}"
)
try:
overrides[k] = json.loads(v)
except json.decoder.JSONDecodeError:
raise ValueError(
- f'Invalid JSON value: {v}. '
- 'Values specified via -e / --extra-settings flags '
- 'must be in JSON notation. '
- 'Use -e KEY=\'"string"\' to specify a string value; '
- '-e KEY=null to specify None; '
- '-e KEY=false (or true) to specify False (or True).'
+ f"Invalid JSON value: {v}. "
+ "Values specified via -e / --extra-settings flags "
+ "must be in JSON notation. "
+ "Use -e KEY='\"string\"' to specify a string value; "
+ "-e KEY=null to specify None; "
+ "-e KEY=false (or true) to specify False (or True)."
)
setattr(namespace, self.dest, overrides)
def parse_arguments(argv=None):
parser = argparse.ArgumentParser(
- description='A tool to generate a static blog, '
- ' with restructured text input files.',
- formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ description="A tool to generate a static blog, "
+ " with restructured text input files.",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
- parser.add_argument(dest='path', nargs='?',
- help='Path where to find the content files.',
- default=None)
+ parser.add_argument(
+ dest="path",
+ nargs="?",
+ help="Path where to find the content files.",
+ default=None,
+ )
- parser.add_argument('-t', '--theme-path', dest='theme',
- help='Path where to find the theme templates. If not '
- 'specified, it will use the default one included with '
- 'pelican.')
+ parser.add_argument(
+ "-t",
+ "--theme-path",
+ dest="theme",
+ help="Path where to find the theme templates. If not "
+ "specified, it will use the default one included with "
+ "pelican.",
+ )
- parser.add_argument('-o', '--output', dest='output',
- help='Where to output the generated files. If not '
- 'specified, a directory will be created, named '
- '"output" in the current path.')
+ parser.add_argument(
+ "-o",
+ "--output",
+ dest="output",
+ help="Where to output the generated files. If not "
+ "specified, a directory will be created, named "
+ '"output" in the current path.',
+ )
- parser.add_argument('-s', '--settings', dest='settings',
- help='The settings of the application, this is '
- 'automatically set to {} if a file exists with this '
- 'name.'.format(DEFAULT_CONFIG_NAME))
+ parser.add_argument(
+ "-s",
+ "--settings",
+ dest="settings",
+ help="The settings of the application, this is "
+ "automatically set to {} if a file exists with this "
+ "name.".format(DEFAULT_CONFIG_NAME),
+ )
- parser.add_argument('-d', '--delete-output-directory',
- dest='delete_outputdir', action='store_true',
- default=None, help='Delete the output directory.')
+ parser.add_argument(
+ "-d",
+ "--delete-output-directory",
+ dest="delete_outputdir",
+ action="store_true",
+ default=None,
+ help="Delete the output directory.",
+ )
- parser.add_argument('-v', '--verbose', action='store_const',
- const=logging.INFO, dest='verbosity',
- help='Show all messages.')
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_const",
+ const=logging.INFO,
+ dest="verbosity",
+ help="Show all messages.",
+ )
- parser.add_argument('-q', '--quiet', action='store_const',
- const=logging.CRITICAL, dest='verbosity',
- help='Show only critical errors.')
+ parser.add_argument(
+ "-q",
+ "--quiet",
+ action="store_const",
+ const=logging.CRITICAL,
+ dest="verbosity",
+ help="Show only critical errors.",
+ )
- parser.add_argument('-D', '--debug', action='store_const',
- const=logging.DEBUG, dest='verbosity',
- help='Show all messages, including debug messages.')
+ parser.add_argument(
+ "-D",
+ "--debug",
+ action="store_const",
+ const=logging.DEBUG,
+ dest="verbosity",
+ help="Show all messages, including debug messages.",
+ )
- parser.add_argument('--version', action='version', version=__version__,
- help='Print the pelican version and exit.')
+ parser.add_argument(
+ "--version",
+ action="version",
+ version=__version__,
+ help="Print the pelican version and exit.",
+ )
- parser.add_argument('-r', '--autoreload', dest='autoreload',
- action='store_true',
- help='Relaunch pelican each time a modification occurs'
- ' on the content files.')
+ parser.add_argument(
+ "-r",
+ "--autoreload",
+ dest="autoreload",
+ action="store_true",
+ help="Relaunch pelican each time a modification occurs"
+ " on the content files.",
+ )
- parser.add_argument('--print-settings', dest='print_settings', nargs='*',
- action=PrintSettings, metavar='SETTING_NAME',
- help='Print current configuration settings and exit. '
- 'Append one or more setting name arguments to see the '
- 'values for specific settings only.')
+ parser.add_argument(
+ "--print-settings",
+ dest="print_settings",
+ nargs="*",
+ action=PrintSettings,
+ metavar="SETTING_NAME",
+ help="Print current configuration settings and exit. "
+ "Append one or more setting name arguments to see the "
+ "values for specific settings only.",
+ )
- parser.add_argument('--relative-urls', dest='relative_paths',
- action='store_true',
- help='Use relative urls in output, '
- 'useful for site development')
+ parser.add_argument(
+ "--relative-urls",
+ dest="relative_paths",
+ action="store_true",
+ help="Use relative urls in output, " "useful for site development",
+ )
- parser.add_argument('--cache-path', dest='cache_path',
- help=('Directory in which to store cache files. '
- 'If not specified, defaults to "cache".'))
+ parser.add_argument(
+ "--cache-path",
+ dest="cache_path",
+ help=(
+ "Directory in which to store cache files. "
+ 'If not specified, defaults to "cache".'
+ ),
+ )
- parser.add_argument('--ignore-cache', action='store_true',
- dest='ignore_cache', help='Ignore content cache '
- 'from previous runs by not loading cache files.')
+ parser.add_argument(
+ "--ignore-cache",
+ action="store_true",
+ dest="ignore_cache",
+ help="Ignore content cache " "from previous runs by not loading cache files.",
+ )
- parser.add_argument('-w', '--write-selected', type=str,
- dest='selected_paths', default=None,
- help='Comma separated list of selected paths to write')
+ parser.add_argument(
+ "-w",
+ "--write-selected",
+ type=str,
+ dest="selected_paths",
+ default=None,
+ help="Comma separated list of selected paths to write",
+ )
- parser.add_argument('--fatal', metavar='errors|warnings',
- choices=('errors', 'warnings'), default='',
- help=('Exit the program with non-zero status if any '
- 'errors/warnings encountered.'))
+ parser.add_argument(
+ "--fatal",
+ metavar="errors|warnings",
+ choices=("errors", "warnings"),
+ default="",
+ help=(
+ "Exit the program with non-zero status if any "
+ "errors/warnings encountered."
+ ),
+ )
- parser.add_argument('--logs-dedup-min-level', default='WARNING',
- choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'),
- help=('Only enable log de-duplication for levels equal'
- ' to or above the specified value'))
+ parser.add_argument(
+ "--logs-dedup-min-level",
+ default="WARNING",
+ choices=("DEBUG", "INFO", "WARNING", "ERROR"),
+ help=(
+ "Only enable log de-duplication for levels equal"
+ " to or above the specified value"
+ ),
+ )
- parser.add_argument('-l', '--listen', dest='listen', action='store_true',
- help='Serve content files via HTTP and port 8000.')
+ parser.add_argument(
+ "-l",
+ "--listen",
+ dest="listen",
+ action="store_true",
+ help="Serve content files via HTTP and port 8000.",
+ )
- parser.add_argument('-p', '--port', dest='port', type=int,
- help='Port to serve HTTP files at. (default: 8000)')
+ parser.add_argument(
+ "-p",
+ "--port",
+ dest="port",
+ type=int,
+ help="Port to serve HTTP files at. (default: 8000)",
+ )
- parser.add_argument('-b', '--bind', dest='bind',
- help='IP to bind to when serving files via HTTP '
- '(default: 127.0.0.1)')
+ parser.add_argument(
+ "-b",
+ "--bind",
+ dest="bind",
+ help="IP to bind to when serving files via HTTP " "(default: 127.0.0.1)",
+ )
- parser.add_argument('-e', '--extra-settings', dest='overrides',
- help='Specify one or more SETTING=VALUE pairs to '
- 'override settings. VALUE must be in JSON notation: '
- 'specify string values as SETTING=\'"some string"\'; '
- 'booleans as SETTING=true or SETTING=false; '
- 'None as SETTING=null.',
- nargs='*',
- action=ParseOverrides,
- default={})
+ parser.add_argument(
+ "-e",
+ "--extra-settings",
+ dest="overrides",
+ help="Specify one or more SETTING=VALUE pairs to "
+ "override settings. VALUE must be in JSON notation: "
+ "specify string values as SETTING='\"some string\"'; "
+ "booleans as SETTING=true or SETTING=false; "
+ "None as SETTING=null.",
+ nargs="*",
+ action=ParseOverrides,
+ default={},
+ )
args = parser.parse_args(argv)
if args.port is not None and not args.listen:
- logger.warning('--port without --listen has no effect')
+ logger.warning("--port without --listen has no effect")
if args.bind is not None and not args.listen:
- logger.warning('--bind without --listen has no effect')
+ logger.warning("--bind without --listen has no effect")
return args
def get_config(args):
- """Builds a config dictionary based on supplied `args`.
- """
+ """Builds a config dictionary based on supplied `args`."""
config = {}
if args.path:
- config['PATH'] = os.path.abspath(os.path.expanduser(args.path))
+ config["PATH"] = os.path.abspath(os.path.expanduser(args.path))
if args.output:
- config['OUTPUT_PATH'] = \
- os.path.abspath(os.path.expanduser(args.output))
+ config["OUTPUT_PATH"] = os.path.abspath(os.path.expanduser(args.output))
if args.theme:
abstheme = os.path.abspath(os.path.expanduser(args.theme))
- config['THEME'] = abstheme if os.path.exists(abstheme) else args.theme
+ config["THEME"] = abstheme if os.path.exists(abstheme) else args.theme
if args.delete_outputdir is not None:
- config['DELETE_OUTPUT_DIRECTORY'] = args.delete_outputdir
+ config["DELETE_OUTPUT_DIRECTORY"] = args.delete_outputdir
if args.ignore_cache:
- config['LOAD_CONTENT_CACHE'] = False
+ config["LOAD_CONTENT_CACHE"] = False
if args.cache_path:
- config['CACHE_PATH'] = args.cache_path
+ config["CACHE_PATH"] = args.cache_path
if args.selected_paths:
- config['WRITE_SELECTED'] = args.selected_paths.split(',')
+ config["WRITE_SELECTED"] = args.selected_paths.split(",")
if args.relative_paths:
- config['RELATIVE_URLS'] = args.relative_paths
+ config["RELATIVE_URLS"] = args.relative_paths
if args.port is not None:
- config['PORT'] = args.port
+ config["PORT"] = args.port
if args.bind is not None:
- config['BIND'] = args.bind
- config['DEBUG'] = args.verbosity == logging.DEBUG
+ config["BIND"] = args.bind
+ config["DEBUG"] = args.verbosity == logging.DEBUG
config.update(args.overrides)
return config
def get_instance(args):
-
config_file = args.settings
if config_file is None and os.path.isfile(DEFAULT_CONFIG_NAME):
config_file = DEFAULT_CONFIG_NAME
@@ -439,9 +549,9 @@ def get_instance(args):
settings = read_settings(config_file, override=get_config(args))
- cls = settings['PELICAN_CLASS']
+ cls = settings["PELICAN_CLASS"]
if isinstance(cls, str):
- module, cls_name = cls.rsplit('.', 1)
+ module, cls_name = cls.rsplit(".", 1)
module = __import__(module)
cls = getattr(module, cls_name)
@@ -449,8 +559,10 @@ def get_instance(args):
def autoreload(args, excqueue=None):
- console.print(' --- AutoReload Mode: Monitoring `content`, `theme` and'
- ' `settings` for changes. ---')
+ console.print(
+ " --- AutoReload Mode: Monitoring `content`, `theme` and"
+ " `settings` for changes. ---"
+ )
pelican, settings = get_instance(args)
settings_file = os.path.abspath(args.settings)
while True:
@@ -463,8 +575,9 @@ def autoreload(args, excqueue=None):
if settings_file in changed_files:
pelican, settings = get_instance(args)
- console.print('\n-> Modified: {}. re-generating...'.format(
- ', '.join(changed_files)))
+ console.print(
+ "\n-> Modified: {}. re-generating...".format(", ".join(changed_files))
+ )
except KeyboardInterrupt:
if excqueue is not None:
@@ -473,15 +586,14 @@ def autoreload(args, excqueue=None):
raise
except Exception as e:
- if (args.verbosity == logging.DEBUG):
+ if args.verbosity == logging.DEBUG:
if excqueue is not None:
- excqueue.put(
- traceback.format_exception_only(type(e), e)[-1])
+ excqueue.put(traceback.format_exception_only(type(e), e)[-1])
else:
raise
logger.warning(
- 'Caught exception:\n"%s".', e,
- exc_info=settings.get('DEBUG', False))
+ 'Caught exception:\n"%s".', e, exc_info=settings.get("DEBUG", False)
+ )
def listen(server, port, output, excqueue=None):
@@ -491,8 +603,7 @@ def listen(server, port, output, excqueue=None):
RootedHTTPServer.allow_reuse_address = True
try:
- httpd = RootedHTTPServer(
- output, (server, port), ComplexHTTPRequestHandler)
+ httpd = RootedHTTPServer(output, (server, port), ComplexHTTPRequestHandler)
except OSError as e:
logging.error("Could not listen on port %s, server %s.", port, server)
if excqueue is not None:
@@ -500,8 +611,9 @@ def listen(server, port, output, excqueue=None):
return
try:
- console.print("Serving site at: http://{}:{} - Tap CTRL-C to stop".format(
- server, port))
+ console.print(
+ "Serving site at: http://{}:{} - Tap CTRL-C to stop".format(server, port)
+ )
httpd.serve_forever()
except Exception as e:
if excqueue is not None:
@@ -518,24 +630,31 @@ def listen(server, port, output, excqueue=None):
def main(argv=None):
args = parse_arguments(argv)
logs_dedup_min_level = getattr(logging, args.logs_dedup_min_level)
- init_logging(level=args.verbosity, fatal=args.fatal,
- name=__name__, logs_dedup_min_level=logs_dedup_min_level)
+ init_logging(
+ level=args.verbosity,
+ fatal=args.fatal,
+ name=__name__,
+ logs_dedup_min_level=logs_dedup_min_level,
+ )
- logger.debug('Pelican version: %s', __version__)
- logger.debug('Python version: %s', sys.version.split()[0])
+ logger.debug("Pelican version: %s", __version__)
+ logger.debug("Python version: %s", sys.version.split()[0])
try:
pelican, settings = get_instance(args)
if args.autoreload and args.listen:
excqueue = multiprocessing.Queue()
- p1 = multiprocessing.Process(
- target=autoreload,
- args=(args, excqueue))
+ p1 = multiprocessing.Process(target=autoreload, args=(args, excqueue))
p2 = multiprocessing.Process(
target=listen,
- args=(settings.get('BIND'), settings.get('PORT'),
- settings.get("OUTPUT_PATH"), excqueue))
+ args=(
+ settings.get("BIND"),
+ settings.get("PORT"),
+ settings.get("OUTPUT_PATH"),
+ excqueue,
+ ),
+ )
try:
p1.start()
p2.start()
@@ -548,16 +667,17 @@ def main(argv=None):
elif args.autoreload:
autoreload(args)
elif args.listen:
- listen(settings.get('BIND'), settings.get('PORT'),
- settings.get("OUTPUT_PATH"))
+ listen(
+ settings.get("BIND"), settings.get("PORT"), settings.get("OUTPUT_PATH")
+ )
else:
with console.status("Generating..."):
pelican.run()
except KeyboardInterrupt:
- logger.warning('Keyboard interrupt received. Exiting.')
+ logger.warning("Keyboard interrupt received. Exiting.")
except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e)
if args.verbosity == logging.DEBUG:
console.print_exception()
- sys.exit(getattr(e, 'exitcode', 1))
+ sys.exit(getattr(e, "exitcode", 1))
diff --git a/pelican/__main__.py b/pelican/__main__.py
index 69a5b95d..17aead3b 100644
--- a/pelican/__main__.py
+++ b/pelican/__main__.py
@@ -5,5 +5,5 @@ python -m pelican module entry point to run via python -m
from . import main
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/pelican/cache.py b/pelican/cache.py
index d2665691..d1f8550e 100644
--- a/pelican/cache.py
+++ b/pelican/cache.py
@@ -19,29 +19,35 @@ class FileDataCacher:
Sets caching policy according to *caching_policy*.
"""
self.settings = settings
- self._cache_path = os.path.join(self.settings['CACHE_PATH'],
- cache_name)
+ self._cache_path = os.path.join(self.settings["CACHE_PATH"], cache_name)
self._cache_data_policy = caching_policy
- if self.settings['GZIP_CACHE']:
+ if self.settings["GZIP_CACHE"]:
import gzip
+
self._cache_open = gzip.open
else:
self._cache_open = open
if load_policy:
try:
- with self._cache_open(self._cache_path, 'rb') as fhandle:
+ with self._cache_open(self._cache_path, "rb") as fhandle:
self._cache = pickle.load(fhandle)
except (OSError, UnicodeDecodeError) as err:
- logger.debug('Cannot load cache %s (this is normal on first '
- 'run). Proceeding with empty cache.\n%s',
- self._cache_path, err)
+ logger.debug(
+ "Cannot load cache %s (this is normal on first "
+ "run). Proceeding with empty cache.\n%s",
+ self._cache_path,
+ err,
+ )
self._cache = {}
except pickle.PickleError as err:
- logger.warning('Cannot unpickle cache %s, cache may be using '
- 'an incompatible protocol (see pelican '
- 'caching docs). '
- 'Proceeding with empty cache.\n%s',
- self._cache_path, err)
+ logger.warning(
+ "Cannot unpickle cache %s, cache may be using "
+ "an incompatible protocol (see pelican "
+ "caching docs). "
+ "Proceeding with empty cache.\n%s",
+ self._cache_path,
+ err,
+ )
self._cache = {}
else:
self._cache = {}
@@ -62,12 +68,13 @@ class FileDataCacher:
"""Save the updated cache"""
if self._cache_data_policy:
try:
- mkdir_p(self.settings['CACHE_PATH'])
- with self._cache_open(self._cache_path, 'wb') as fhandle:
+ mkdir_p(self.settings["CACHE_PATH"])
+ with self._cache_open(self._cache_path, "wb") as fhandle:
pickle.dump(self._cache, fhandle)
except (OSError, pickle.PicklingError, TypeError) as err:
- logger.warning('Could not save cache %s\n ... %s',
- self._cache_path, err)
+ logger.warning(
+ "Could not save cache %s\n ... %s", self._cache_path, err
+ )
class FileStampDataCacher(FileDataCacher):
@@ -80,8 +87,8 @@ class FileStampDataCacher(FileDataCacher):
super().__init__(settings, cache_name, caching_policy, load_policy)
- method = self.settings['CHECK_MODIFIED_METHOD']
- if method == 'mtime':
+ method = self.settings["CHECK_MODIFIED_METHOD"]
+ if method == "mtime":
self._filestamp_func = os.path.getmtime
else:
try:
@@ -89,12 +96,12 @@ class FileStampDataCacher(FileDataCacher):
def filestamp_func(filename):
"""return hash of file contents"""
- with open(filename, 'rb') as fhandle:
+ with open(filename, "rb") as fhandle:
return hash_func(fhandle.read()).digest()
self._filestamp_func = filestamp_func
except AttributeError as err:
- logger.warning('Could not get hashing function\n\t%s', err)
+ logger.warning("Could not get hashing function\n\t%s", err)
self._filestamp_func = None
def cache_data(self, filename, data):
@@ -115,9 +122,8 @@ class FileStampDataCacher(FileDataCacher):
try:
return self._filestamp_func(filename)
except (OSError, TypeError) as err:
- logger.warning('Cannot get modification stamp for %s\n\t%s',
- filename, err)
- return ''
+ logger.warning("Cannot get modification stamp for %s\n\t%s", filename, err)
+ return ""
def get_cached_data(self, filename, default=None):
"""Get the cached data for the given filename
diff --git a/pelican/contents.py b/pelican/contents.py
index c347a999..f99e6426 100644
--- a/pelican/contents.py
+++ b/pelican/contents.py
@@ -16,12 +16,19 @@ except ModuleNotFoundError:
from pelican.plugins import signals
from pelican.settings import DEFAULT_CONFIG
-from pelican.utils import (deprecated_attribute, memoized, path_to_url,
- posixize_path, sanitised_join, set_date_tzinfo,
- slugify, truncate_html_words)
+from pelican.utils import (
+ deprecated_attribute,
+ memoized,
+ path_to_url,
+ posixize_path,
+ sanitised_join,
+ set_date_tzinfo,
+ slugify,
+ truncate_html_words,
+)
# Import these so that they're available when you import from pelican.contents.
-from pelican.urlwrappers import (Author, Category, Tag, URLWrapper) # NOQA
+from pelican.urlwrappers import Author, Category, Tag, URLWrapper # NOQA
logger = logging.getLogger(__name__)
@@ -36,12 +43,14 @@ class Content:
:param context: The shared context between generators.
"""
- @deprecated_attribute(old='filename', new='source_path', since=(3, 2, 0))
+
+ @deprecated_attribute(old="filename", new="source_path", since=(3, 2, 0))
def filename():
return None
- def __init__(self, content, metadata=None, settings=None,
- source_path=None, context=None):
+ def __init__(
+ self, content, metadata=None, settings=None, source_path=None, context=None
+ ):
if metadata is None:
metadata = {}
if settings is None:
@@ -59,8 +68,8 @@ class Content:
# set metadata as attributes
for key, value in local_metadata.items():
- if key in ('save_as', 'url'):
- key = 'override_' + key
+ if key in ("save_as", "url"):
+ key = "override_" + key
setattr(self, key.lower(), value)
# also keep track of the metadata attributes available
@@ -71,53 +80,52 @@ class Content:
# First, read the authors from "authors", if not, fallback to "author"
# and if not use the settings defined one, if any.
- if not hasattr(self, 'author'):
- if hasattr(self, 'authors'):
+ if not hasattr(self, "author"):
+ if hasattr(self, "authors"):
self.author = self.authors[0]
- elif 'AUTHOR' in settings:
- self.author = Author(settings['AUTHOR'], settings)
+ elif "AUTHOR" in settings:
+ self.author = Author(settings["AUTHOR"], settings)
- if not hasattr(self, 'authors') and hasattr(self, 'author'):
+ if not hasattr(self, "authors") and hasattr(self, "author"):
self.authors = [self.author]
# XXX Split all the following code into pieces, there is too much here.
# manage languages
self.in_default_lang = True
- if 'DEFAULT_LANG' in settings:
- default_lang = settings['DEFAULT_LANG'].lower()
- if not hasattr(self, 'lang'):
+ if "DEFAULT_LANG" in settings:
+ default_lang = settings["DEFAULT_LANG"].lower()
+ if not hasattr(self, "lang"):
self.lang = default_lang
- self.in_default_lang = (self.lang == default_lang)
+ self.in_default_lang = self.lang == default_lang
# create the slug if not existing, generate slug according to
# setting of SLUG_ATTRIBUTE
- if not hasattr(self, 'slug'):
- if (settings['SLUGIFY_SOURCE'] == 'title' and
- hasattr(self, 'title')):
+ if not hasattr(self, "slug"):
+ if settings["SLUGIFY_SOURCE"] == "title" and hasattr(self, "title"):
value = self.title
- elif (settings['SLUGIFY_SOURCE'] == 'basename' and
- source_path is not None):
+ elif settings["SLUGIFY_SOURCE"] == "basename" and source_path is not None:
value = os.path.basename(os.path.splitext(source_path)[0])
else:
value = None
if value is not None:
self.slug = slugify(
value,
- regex_subs=settings.get('SLUG_REGEX_SUBSTITUTIONS', []),
- preserve_case=settings.get('SLUGIFY_PRESERVE_CASE', False),
- use_unicode=settings.get('SLUGIFY_USE_UNICODE', False))
+ regex_subs=settings.get("SLUG_REGEX_SUBSTITUTIONS", []),
+ preserve_case=settings.get("SLUGIFY_PRESERVE_CASE", False),
+ use_unicode=settings.get("SLUGIFY_USE_UNICODE", False),
+ )
self.source_path = source_path
self.relative_source_path = self.get_relative_source_path()
# manage the date format
- if not hasattr(self, 'date_format'):
- if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
- self.date_format = settings['DATE_FORMATS'][self.lang]
+ if not hasattr(self, "date_format"):
+ if hasattr(self, "lang") and self.lang in settings["DATE_FORMATS"]:
+ self.date_format = settings["DATE_FORMATS"][self.lang]
else:
- self.date_format = settings['DEFAULT_DATE_FORMAT']
+ self.date_format = settings["DEFAULT_DATE_FORMAT"]
if isinstance(self.date_format, tuple):
locale_string = self.date_format[0]
@@ -129,22 +137,22 @@ class Content:
timezone = getattr(self, "timezone", default_timezone)
self.timezone = ZoneInfo(timezone)
- if hasattr(self, 'date'):
+ if hasattr(self, "date"):
self.date = set_date_tzinfo(self.date, timezone)
self.locale_date = self.date.strftime(self.date_format)
- if hasattr(self, 'modified'):
+ if hasattr(self, "modified"):
self.modified = set_date_tzinfo(self.modified, timezone)
self.locale_modified = self.modified.strftime(self.date_format)
# manage status
- if not hasattr(self, 'status'):
+ if not hasattr(self, "status"):
# Previous default of None broke comment plugins and perhaps others
- self.status = getattr(self, 'default_status', '')
+ self.status = getattr(self, "default_status", "")
# store the summary metadata if it is set
- if 'summary' in metadata:
- self._summary = metadata['summary']
+ if "summary" in metadata:
+ self._summary = metadata["summary"]
signals.content_object_init.send(self)
@@ -156,8 +164,8 @@ class Content:
for prop in self.mandatory_properties:
if not hasattr(self, prop):
logger.error(
- "Skipping %s: could not find information about '%s'",
- self, prop)
+ "Skipping %s: could not find information about '%s'", self, prop
+ )
return False
return True
@@ -183,12 +191,13 @@ class Content:
return True
def _has_valid_status(self):
- if hasattr(self, 'allowed_statuses'):
+ if hasattr(self, "allowed_statuses"):
if self.status not in self.allowed_statuses:
logger.error(
"Unknown status '%s' for file %s, skipping it. (Not in %s)",
self.status,
- self, self.allowed_statuses
+ self,
+ self.allowed_statuses,
)
return False
@@ -198,42 +207,48 @@ class Content:
def is_valid(self):
"""Validate Content"""
# Use all() to not short circuit and get results of all validations
- return all([self._has_valid_mandatory_properties(),
- self._has_valid_save_as(),
- self._has_valid_status()])
+ return all(
+ [
+ self._has_valid_mandatory_properties(),
+ self._has_valid_save_as(),
+ self._has_valid_status(),
+ ]
+ )
@property
def url_format(self):
"""Returns the URL, formatted with the proper values"""
metadata = copy.copy(self.metadata)
- path = self.metadata.get('path', self.get_relative_source_path())
- metadata.update({
- 'path': path_to_url(path),
- 'slug': getattr(self, 'slug', ''),
- 'lang': getattr(self, 'lang', 'en'),
- 'date': getattr(self, 'date', datetime.datetime.now()),
- 'author': self.author.slug if hasattr(self, 'author') else '',
- 'category': self.category.slug if hasattr(self, 'category') else ''
- })
+ path = self.metadata.get("path", self.get_relative_source_path())
+ metadata.update(
+ {
+ "path": path_to_url(path),
+ "slug": getattr(self, "slug", ""),
+ "lang": getattr(self, "lang", "en"),
+ "date": getattr(self, "date", datetime.datetime.now()),
+ "author": self.author.slug if hasattr(self, "author") else "",
+ "category": self.category.slug if hasattr(self, "category") else "",
+ }
+ )
return metadata
def _expand_settings(self, key, klass=None):
if not klass:
klass = self.__class__.__name__
- fq_key = ('{}_{}'.format(klass, key)).upper()
+ fq_key = ("{}_{}".format(klass, key)).upper()
return str(self.settings[fq_key]).format(**self.url_format)
def get_url_setting(self, key):
- if hasattr(self, 'override_' + key):
- return getattr(self, 'override_' + key)
- key = key if self.in_default_lang else 'lang_%s' % key
+ if hasattr(self, "override_" + key):
+ return getattr(self, "override_" + key)
+ key = key if self.in_default_lang else "lang_%s" % key
return self._expand_settings(key)
def _link_replacer(self, siteurl, m):
- what = m.group('what')
- value = urlparse(m.group('value'))
+ what = m.group("what")
+ value = urlparse(m.group("value"))
path = value.path
- origin = m.group('path')
+ origin = m.group("path")
# urllib.parse.urljoin() produces `a.html` for urljoin("..", "a.html")
# so if RELATIVE_URLS are enabled, we fall back to os.path.join() to
@@ -241,7 +256,7 @@ class Content:
# `baz/http://foo/bar.html` for join("baz", "http://foo/bar.html")
# instead of correct "http://foo/bar.html", so one has to pick a side
# as there is no silver bullet.
- if self.settings['RELATIVE_URLS']:
+ if self.settings["RELATIVE_URLS"]:
joiner = os.path.join
else:
joiner = urljoin
@@ -251,16 +266,17 @@ class Content:
# os.path.join()), so in order to get a correct answer one needs to
# append a trailing slash to siteurl in that case. This also makes
# the new behavior fully compatible with Pelican 3.7.1.
- if not siteurl.endswith('/'):
- siteurl += '/'
+ if not siteurl.endswith("/"):
+ siteurl += "/"
# XXX Put this in a different location.
- if what in {'filename', 'static', 'attach'}:
+ if what in {"filename", "static", "attach"}:
+
def _get_linked_content(key, url):
nonlocal value
def _find_path(path):
- if path.startswith('/'):
+ if path.startswith("/"):
path = path[1:]
else:
# relative to the source path of this content
@@ -287,59 +303,64 @@ class Content:
return result
# check if a static file is linked with {filename}
- if what == 'filename' and key == 'generated_content':
- linked_content = _get_linked_content('static_content', value)
+ if what == "filename" and key == "generated_content":
+ linked_content = _get_linked_content("static_content", value)
if linked_content:
logger.warning(
- '{filename} used for linking to static'
- ' content %s in %s. Use {static} instead',
+ "{filename} used for linking to static"
+ " content %s in %s. Use {static} instead",
value.path,
- self.get_relative_source_path())
+ self.get_relative_source_path(),
+ )
return linked_content
return None
- if what == 'filename':
- key = 'generated_content'
+ if what == "filename":
+ key = "generated_content"
else:
- key = 'static_content'
+ key = "static_content"
linked_content = _get_linked_content(key, value)
if linked_content:
- if what == 'attach':
+ if what == "attach":
linked_content.attach_to(self)
origin = joiner(siteurl, linked_content.url)
- origin = origin.replace('\\', '/') # for Windows paths.
+ origin = origin.replace("\\", "/") # for Windows paths.
else:
logger.warning(
"Unable to find '%s', skipping url replacement.",
- value.geturl(), extra={
- 'limit_msg': ("Other resources were not found "
- "and their urls not replaced")})
- elif what == 'category':
+ value.geturl(),
+ extra={
+ "limit_msg": (
+ "Other resources were not found "
+ "and their urls not replaced"
+ )
+ },
+ )
+ elif what == "category":
origin = joiner(siteurl, Category(path, self.settings).url)
- elif what == 'tag':
+ elif what == "tag":
origin = joiner(siteurl, Tag(path, self.settings).url)
- elif what == 'index':
- origin = joiner(siteurl, self.settings['INDEX_SAVE_AS'])
- elif what == 'author':
+ elif what == "index":
+ origin = joiner(siteurl, self.settings["INDEX_SAVE_AS"])
+ elif what == "author":
origin = joiner(siteurl, Author(path, self.settings).url)
else:
logger.warning(
- "Replacement Indicator '%s' not recognized, "
- "skipping replacement",
- what)
+ "Replacement Indicator '%s' not recognized, " "skipping replacement",
+ what,
+ )
# keep all other parts, such as query, fragment, etc.
parts = list(value)
parts[2] = origin
origin = urlunparse(parts)
- return ''.join((m.group('markup'), m.group('quote'), origin,
- m.group('quote')))
+ return "".join((m.group("markup"), m.group("quote"), origin, m.group("quote")))
def _get_intrasite_link_regex(self):
- intrasite_link_regex = self.settings['INTRASITE_LINK_REGEX']
+ intrasite_link_regex = self.settings["INTRASITE_LINK_REGEX"]
regex = r"""
(?P<[^\>]+ # match tag with all url-value attributes
(?:href|src|poster|data|cite|formaction|action|content)\s*=\s*)
@@ -369,28 +390,28 @@ class Content:
static_links = set()
hrefs = self._get_intrasite_link_regex()
for m in hrefs.finditer(self._content):
- what = m.group('what')
- value = urlparse(m.group('value'))
+ what = m.group("what")
+ value = urlparse(m.group("value"))
path = value.path
- if what not in {'static', 'attach'}:
+ if what not in {"static", "attach"}:
continue
- if path.startswith('/'):
+ if path.startswith("/"):
path = path[1:]
else:
# relative to the source path of this content
path = self.get_relative_source_path(
os.path.join(self.relative_dir, path)
)
- path = path.replace('%20', ' ')
+ path = path.replace("%20", " ")
static_links.add(path)
return static_links
def get_siteurl(self):
- return self._context.get('localsiteurl', '')
+ return self._context.get("localsiteurl", "")
@memoized
def get_content(self, siteurl):
- if hasattr(self, '_get_content'):
+ if hasattr(self, "_get_content"):
content = self._get_content()
else:
content = self._content
@@ -407,15 +428,17 @@ class Content:
This is based on the summary metadata if set, otherwise truncate the
content.
"""
- if 'summary' in self.metadata:
- return self.metadata['summary']
+ if "summary" in self.metadata:
+ return self.metadata["summary"]
- if self.settings['SUMMARY_MAX_LENGTH'] is None:
+ if self.settings["SUMMARY_MAX_LENGTH"] is None:
return self.content
- return truncate_html_words(self.content,
- self.settings['SUMMARY_MAX_LENGTH'],
- self.settings['SUMMARY_END_SUFFIX'])
+ return truncate_html_words(
+ self.content,
+ self.settings["SUMMARY_MAX_LENGTH"],
+ self.settings["SUMMARY_END_SUFFIX"],
+ )
@property
def summary(self):
@@ -424,8 +447,10 @@ class Content:
def _get_summary(self):
"""deprecated function to access summary"""
- logger.warning('_get_summary() has been deprecated since 3.6.4. '
- 'Use the summary decorator instead')
+ logger.warning(
+ "_get_summary() has been deprecated since 3.6.4. "
+ "Use the summary decorator instead"
+ )
return self.summary
@summary.setter
@@ -444,14 +469,14 @@ class Content:
@property
def url(self):
- return self.get_url_setting('url')
+ return self.get_url_setting("url")
@property
def save_as(self):
- return self.get_url_setting('save_as')
+ return self.get_url_setting("save_as")
def _get_template(self):
- if hasattr(self, 'template') and self.template is not None:
+ if hasattr(self, "template") and self.template is not None:
return self.template
else:
return self.default_template
@@ -470,11 +495,10 @@ class Content:
return posixize_path(
os.path.relpath(
- os.path.abspath(os.path.join(
- self.settings['PATH'],
- source_path)),
- os.path.abspath(self.settings['PATH'])
- ))
+ os.path.abspath(os.path.join(self.settings["PATH"], source_path)),
+ os.path.abspath(self.settings["PATH"]),
+ )
+ )
@property
def relative_dir(self):
@@ -482,85 +506,84 @@ class Content:
os.path.dirname(
os.path.relpath(
os.path.abspath(self.source_path),
- os.path.abspath(self.settings['PATH']))))
+ os.path.abspath(self.settings["PATH"]),
+ )
+ )
+ )
def refresh_metadata_intersite_links(self):
- for key in self.settings['FORMATTED_FIELDS']:
- if key in self.metadata and key != 'summary':
- value = self._update_content(
- self.metadata[key],
- self.get_siteurl()
- )
+ for key in self.settings["FORMATTED_FIELDS"]:
+ if key in self.metadata and key != "summary":
+ value = self._update_content(self.metadata[key], self.get_siteurl())
self.metadata[key] = value
setattr(self, key.lower(), value)
# _summary is an internal variable that some plugins may be writing to,
# so ensure changes to it are picked up
- if ('summary' in self.settings['FORMATTED_FIELDS'] and
- 'summary' in self.metadata):
- self._summary = self._update_content(
- self._summary,
- self.get_siteurl()
- )
- self.metadata['summary'] = self._summary
+ if (
+ "summary" in self.settings["FORMATTED_FIELDS"]
+ and "summary" in self.metadata
+ ):
+ self._summary = self._update_content(self._summary, self.get_siteurl())
+ self.metadata["summary"] = self._summary
class Page(Content):
- mandatory_properties = ('title',)
- allowed_statuses = ('published', 'hidden', 'draft')
- default_status = 'published'
- default_template = 'page'
+ mandatory_properties = ("title",)
+ allowed_statuses = ("published", "hidden", "draft")
+ default_status = "published"
+ default_template = "page"
def _expand_settings(self, key):
- klass = 'draft_page' if self.status == 'draft' else None
+ klass = "draft_page" if self.status == "draft" else None
return super()._expand_settings(key, klass)
class Article(Content):
- mandatory_properties = ('title', 'date', 'category')
- allowed_statuses = ('published', 'hidden', 'draft')
- default_status = 'published'
- default_template = 'article'
+ mandatory_properties = ("title", "date", "category")
+ allowed_statuses = ("published", "hidden", "draft")
+ default_status = "published"
+ default_template = "article"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# handle WITH_FUTURE_DATES (designate article to draft based on date)
- if not self.settings['WITH_FUTURE_DATES'] and hasattr(self, 'date'):
+ if not self.settings["WITH_FUTURE_DATES"] and hasattr(self, "date"):
if self.date.tzinfo is None:
now = datetime.datetime.now()
else:
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
if self.date > now:
- self.status = 'draft'
+ self.status = "draft"
# if we are a draft and there is no date provided, set max datetime
- if not hasattr(self, 'date') and self.status == 'draft':
+ if not hasattr(self, "date") and self.status == "draft":
self.date = datetime.datetime.max.replace(tzinfo=self.timezone)
def _expand_settings(self, key):
- klass = 'draft' if self.status == 'draft' else 'article'
+ klass = "draft" if self.status == "draft" else "article"
return super()._expand_settings(key, klass)
class Static(Content):
- mandatory_properties = ('title',)
- default_status = 'published'
+ mandatory_properties = ("title",)
+ default_status = "published"
default_template = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._output_location_referenced = False
- @deprecated_attribute(old='filepath', new='source_path', since=(3, 2, 0))
+ @deprecated_attribute(old="filepath", new="source_path", since=(3, 2, 0))
def filepath():
return None
- @deprecated_attribute(old='src', new='source_path', since=(3, 2, 0))
+ @deprecated_attribute(old="src", new="source_path", since=(3, 2, 0))
def src():
return None
- @deprecated_attribute(old='dst', new='save_as', since=(3, 2, 0))
+ @deprecated_attribute(old="dst", new="save_as", since=(3, 2, 0))
def dst():
return None
@@ -577,8 +600,7 @@ class Static(Content):
return super().save_as
def attach_to(self, content):
- """Override our output directory with that of the given content object.
- """
+ """Override our output directory with that of the given content object."""
# Determine our file's new output path relative to the linking
# document. If it currently lives beneath the linking
@@ -589,8 +611,7 @@ class Static(Content):
tail_path = os.path.relpath(self.source_path, linking_source_dir)
if tail_path.startswith(os.pardir + os.sep):
tail_path = os.path.basename(tail_path)
- new_save_as = os.path.join(
- os.path.dirname(content.save_as), tail_path)
+ new_save_as = os.path.join(os.path.dirname(content.save_as), tail_path)
# We do not build our new url by joining tail_path with the linking
# document's url, because we cannot know just by looking at the latter
@@ -609,12 +630,14 @@ class Static(Content):
"%s because %s. Falling back to "
"{filename} link behavior instead.",
content.get_relative_source_path(),
- self.get_relative_source_path(), reason,
- extra={'limit_msg': "More {attach} warnings silenced."})
+ self.get_relative_source_path(),
+ reason,
+ extra={"limit_msg": "More {attach} warnings silenced."},
+ )
# We never override an override, because we don't want to interfere
# with user-defined overrides that might be in EXTRA_PATH_METADATA.
- if hasattr(self, 'override_save_as') or hasattr(self, 'override_url'):
+ if hasattr(self, "override_save_as") or hasattr(self, "override_url"):
if new_save_as != self.save_as or new_url != self.url:
_log_reason("its output location was already overridden")
return
diff --git a/pelican/generators.py b/pelican/generators.py
index b9063304..0bbb7268 100644
--- a/pelican/generators.py
+++ b/pelican/generators.py
@@ -8,15 +8,27 @@ from functools import partial
from itertools import chain, groupby
from operator import attrgetter
-from jinja2 import (BaseLoader, ChoiceLoader, Environment, FileSystemLoader,
- PrefixLoader, TemplateNotFound)
+from jinja2 import (
+ BaseLoader,
+ ChoiceLoader,
+ Environment,
+ FileSystemLoader,
+ PrefixLoader,
+ TemplateNotFound,
+)
from pelican.cache import FileStampDataCacher
from pelican.contents import Article, Page, Static
from pelican.plugins import signals
from pelican.readers import Readers
-from pelican.utils import (DateFormatter, copy, mkdir_p, order_content,
- posixize_path, process_translations)
+from pelican.utils import (
+ DateFormatter,
+ copy,
+ mkdir_p,
+ order_content,
+ posixize_path,
+ process_translations,
+)
logger = logging.getLogger(__name__)
@@ -28,8 +40,16 @@ class PelicanTemplateNotFound(Exception):
class Generator:
"""Baseclass generator"""
- def __init__(self, context, settings, path, theme, output_path,
- readers_cache_name='', **kwargs):
+ def __init__(
+ self,
+ context,
+ settings,
+ path,
+ theme,
+ output_path,
+ readers_cache_name="",
+ **kwargs,
+ ):
self.context = context
self.settings = settings
self.path = path
@@ -43,44 +63,45 @@ class Generator:
# templates cache
self._templates = {}
- self._templates_path = list(self.settings['THEME_TEMPLATES_OVERRIDES'])
+ self._templates_path = list(self.settings["THEME_TEMPLATES_OVERRIDES"])
- theme_templates_path = os.path.expanduser(
- os.path.join(self.theme, 'templates'))
+ theme_templates_path = os.path.expanduser(os.path.join(self.theme, "templates"))
self._templates_path.append(theme_templates_path)
theme_loader = FileSystemLoader(theme_templates_path)
simple_theme_path = os.path.dirname(os.path.abspath(__file__))
simple_loader = FileSystemLoader(
- os.path.join(simple_theme_path, "themes", "simple", "templates"))
-
- self.env = Environment(
- loader=ChoiceLoader([
- FileSystemLoader(self._templates_path),
- simple_loader, # implicit inheritance
- PrefixLoader({
- '!simple': simple_loader,
- '!theme': theme_loader
- }) # explicit ones
- ]),
- **self.settings['JINJA_ENVIRONMENT']
+ os.path.join(simple_theme_path, "themes", "simple", "templates")
)
- logger.debug('Template list: %s', self.env.list_templates())
+ self.env = Environment(
+ loader=ChoiceLoader(
+ [
+ FileSystemLoader(self._templates_path),
+ simple_loader, # implicit inheritance
+ PrefixLoader(
+ {"!simple": simple_loader, "!theme": theme_loader}
+ ), # explicit ones
+ ]
+ ),
+ **self.settings["JINJA_ENVIRONMENT"],
+ )
+
+ logger.debug("Template list: %s", self.env.list_templates())
# provide utils.strftime as a jinja filter
- self.env.filters.update({'strftime': DateFormatter()})
+ self.env.filters.update({"strftime": DateFormatter()})
# get custom Jinja filters from user settings
- custom_filters = self.settings['JINJA_FILTERS']
+ custom_filters = self.settings["JINJA_FILTERS"]
self.env.filters.update(custom_filters)
# get custom Jinja globals from user settings
- custom_globals = self.settings['JINJA_GLOBALS']
+ custom_globals = self.settings["JINJA_GLOBALS"]
self.env.globals.update(custom_globals)
# get custom Jinja tests from user settings
- custom_tests = self.settings['JINJA_TESTS']
+ custom_tests = self.settings["JINJA_TESTS"]
self.env.tests.update(custom_tests)
signals.generator_init.send(self)
@@ -91,7 +112,7 @@ class Generator:
templates ready to use with Jinja2.
"""
if name not in self._templates:
- for ext in self.settings['TEMPLATE_EXTENSIONS']:
+ for ext in self.settings["TEMPLATE_EXTENSIONS"]:
try:
self._templates[name] = self.env.get_template(name + ext)
break
@@ -100,9 +121,12 @@ class Generator:
if name not in self._templates:
raise PelicanTemplateNotFound(
- '[templates] unable to load {}[{}] from {}'.format(
- name, ', '.join(self.settings['TEMPLATE_EXTENSIONS']),
- self._templates_path))
+ "[templates] unable to load {}[{}] from {}".format(
+ name,
+ ", ".join(self.settings["TEMPLATE_EXTENSIONS"]),
+ self._templates_path,
+ )
+ )
return self._templates[name]
@@ -118,7 +142,7 @@ class Generator:
basename = os.path.basename(path)
# check IGNORE_FILES
- ignores = self.settings['IGNORE_FILES']
+ ignores = self.settings["IGNORE_FILES"]
if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):
return False
@@ -147,20 +171,21 @@ class Generator:
exclusions_by_dirpath.setdefault(parent_path, set()).add(subdir)
files = set()
- ignores = self.settings['IGNORE_FILES']
+ ignores = self.settings["IGNORE_FILES"]
for path in paths:
# careful: os.path.join() will add a slash when path == ''.
root = os.path.join(self.path, path) if path else self.path
if os.path.isdir(root):
for dirpath, dirs, temp_files in os.walk(
- root, topdown=True, followlinks=True):
+ root, topdown=True, followlinks=True
+ ):
excl = exclusions_by_dirpath.get(dirpath, ())
# We copy the `dirs` list as we will modify it in the loop:
for d in list(dirs):
- if (d in excl or
- any(fnmatch.fnmatch(d, ignore)
- for ignore in ignores)):
+ if d in excl or any(
+ fnmatch.fnmatch(d, ignore) for ignore in ignores
+ ):
if d in dirs:
dirs.remove(d)
@@ -178,7 +203,7 @@ class Generator:
Store a reference to its Content object, for url lookups later.
"""
location = content.get_relative_source_path()
- key = 'static_content' if static else 'generated_content'
+ key = "static_content" if static else "generated_content"
self.context[key][location] = content
def _add_failed_source_path(self, path, static=False):
@@ -186,7 +211,7 @@ class Generator:
(For example, one that was missing mandatory metadata.)
The path argument is expected to be relative to self.path.
"""
- key = 'static_content' if static else 'generated_content'
+ key = "static_content" if static else "generated_content"
self.context[key][posixize_path(os.path.normpath(path))] = None
def _is_potential_source_path(self, path, static=False):
@@ -195,14 +220,14 @@ class Generator:
before this method is called, even if they failed to process.)
The path argument is expected to be relative to self.path.
"""
- key = 'static_content' if static else 'generated_content'
- return (posixize_path(os.path.normpath(path)) in self.context[key])
+ key = "static_content" if static else "generated_content"
+ return posixize_path(os.path.normpath(path)) in self.context[key]
def add_static_links(self, content):
"""Add file links in content to context to be processed as Static
content.
"""
- self.context['static_links'] |= content.get_static_links()
+ self.context["static_links"] |= content.get_static_links()
def _update_context(self, items):
"""Update the context with the given items from the current processor.
@@ -211,7 +236,7 @@ class Generator:
"""
for item in items:
value = getattr(self, item)
- if hasattr(value, 'items'):
+ if hasattr(value, "items"):
value = list(value.items()) # py3k safeguard for iterators
self.context[item] = value
@@ -221,37 +246,35 @@ class Generator:
class CachingGenerator(Generator, FileStampDataCacher):
- '''Subclass of Generator and FileStampDataCacher classes
+ """Subclass of Generator and FileStampDataCacher classes
enables content caching, either at the generator or reader level
- '''
+ """
def __init__(self, *args, **kwargs):
- '''Initialize the generator, then set up caching
+ """Initialize the generator, then set up caching
note the multiple inheritance structure
- '''
+ """
cls_name = self.__class__.__name__
- Generator.__init__(self, *args,
- readers_cache_name=(cls_name + '-Readers'),
- **kwargs)
+ Generator.__init__(
+ self, *args, readers_cache_name=(cls_name + "-Readers"), **kwargs
+ )
- cache_this_level = \
- self.settings['CONTENT_CACHING_LAYER'] == 'generator'
- caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
- load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
- FileStampDataCacher.__init__(self, self.settings, cls_name,
- caching_policy, load_policy
- )
+ cache_this_level = self.settings["CONTENT_CACHING_LAYER"] == "generator"
+ caching_policy = cache_this_level and self.settings["CACHE_CONTENT"]
+ load_policy = cache_this_level and self.settings["LOAD_CONTENT_CACHE"]
+ FileStampDataCacher.__init__(
+ self, self.settings, cls_name, caching_policy, load_policy
+ )
def _get_file_stamp(self, filename):
- '''Get filestamp for path relative to generator.path'''
+ """Get filestamp for path relative to generator.path"""
filename = os.path.join(self.path, filename)
return super()._get_file_stamp(filename)
class _FileLoader(BaseLoader):
-
def __init__(self, path, basedir):
self.path = path
self.fullpath = os.path.join(basedir, path)
@@ -260,22 +283,21 @@ class _FileLoader(BaseLoader):
if template != self.path or not os.path.exists(self.fullpath):
raise TemplateNotFound(template)
mtime = os.path.getmtime(self.fullpath)
- with open(self.fullpath, encoding='utf-8') as f:
+ with open(self.fullpath, encoding="utf-8") as f:
source = f.read()
- return (source, self.fullpath,
- lambda: mtime == os.path.getmtime(self.fullpath))
+ return (source, self.fullpath, lambda: mtime == os.path.getmtime(self.fullpath))
class TemplatePagesGenerator(Generator):
-
def generate_output(self, writer):
- for source, dest in self.settings['TEMPLATE_PAGES'].items():
+ for source, dest in self.settings["TEMPLATE_PAGES"].items():
self.env.loader.loaders.insert(0, _FileLoader(source, self.path))
try:
template = self.env.get_template(source)
- rurls = self.settings['RELATIVE_URLS']
- writer.write_file(dest, template, self.context, rurls,
- override_output=True, url='')
+ rurls = self.settings["RELATIVE_URLS"]
+ writer.write_file(
+ dest, template, self.context, rurls, override_output=True, url=""
+ )
finally:
del self.env.loader.loaders[0]
@@ -286,13 +308,13 @@ class ArticlesGenerator(CachingGenerator):
def __init__(self, *args, **kwargs):
"""initialize properties"""
# Published, listed articles
- self.articles = [] # only articles in default language
+ self.articles = [] # only articles in default language
self.translations = []
# Published, unlisted articles
self.hidden_articles = []
self.hidden_translations = []
# Draft articles
- self.drafts = [] # only drafts in default language
+ self.drafts = [] # only drafts in default language
self.drafts_translations = []
self.dates = {}
self.period_archives = defaultdict(list)
@@ -306,263 +328,304 @@ class ArticlesGenerator(CachingGenerator):
def generate_feeds(self, writer):
"""Generate the feeds from the current context, and output files."""
- if self.settings.get('FEED_ATOM'):
+ if self.settings.get("FEED_ATOM"):
writer.write_feed(
self.articles,
self.context,
- self.settings['FEED_ATOM'],
- self.settings.get('FEED_ATOM_URL', self.settings['FEED_ATOM'])
- )
+ self.settings["FEED_ATOM"],
+ self.settings.get("FEED_ATOM_URL", self.settings["FEED_ATOM"]),
+ )
- if self.settings.get('FEED_RSS'):
+ if self.settings.get("FEED_RSS"):
writer.write_feed(
self.articles,
self.context,
- self.settings['FEED_RSS'],
- self.settings.get('FEED_RSS_URL', self.settings['FEED_RSS']),
- feed_type='rss'
- )
+ self.settings["FEED_RSS"],
+ self.settings.get("FEED_RSS_URL", self.settings["FEED_RSS"]),
+ feed_type="rss",
+ )
- if (self.settings.get('FEED_ALL_ATOM') or
- self.settings.get('FEED_ALL_RSS')):
+ if self.settings.get("FEED_ALL_ATOM") or self.settings.get("FEED_ALL_RSS"):
all_articles = list(self.articles)
for article in self.articles:
all_articles.extend(article.translations)
- order_content(
- all_articles, order_by=self.settings['ARTICLE_ORDER_BY']
- )
+ order_content(all_articles, order_by=self.settings["ARTICLE_ORDER_BY"])
- if self.settings.get('FEED_ALL_ATOM'):
+ if self.settings.get("FEED_ALL_ATOM"):
writer.write_feed(
all_articles,
self.context,
- self.settings['FEED_ALL_ATOM'],
- self.settings.get('FEED_ALL_ATOM_URL',
- self.settings['FEED_ALL_ATOM'])
- )
+ self.settings["FEED_ALL_ATOM"],
+ self.settings.get(
+ "FEED_ALL_ATOM_URL", self.settings["FEED_ALL_ATOM"]
+ ),
+ )
- if self.settings.get('FEED_ALL_RSS'):
+ if self.settings.get("FEED_ALL_RSS"):
writer.write_feed(
all_articles,
self.context,
- self.settings['FEED_ALL_RSS'],
- self.settings.get('FEED_ALL_RSS_URL',
- self.settings['FEED_ALL_RSS']),
- feed_type='rss'
- )
+ self.settings["FEED_ALL_RSS"],
+ self.settings.get(
+ "FEED_ALL_RSS_URL", self.settings["FEED_ALL_RSS"]
+ ),
+ feed_type="rss",
+ )
for cat, arts in self.categories:
- if self.settings.get('CATEGORY_FEED_ATOM'):
+ if self.settings.get("CATEGORY_FEED_ATOM"):
writer.write_feed(
arts,
self.context,
- str(self.settings['CATEGORY_FEED_ATOM']).format(slug=cat.slug),
+ str(self.settings["CATEGORY_FEED_ATOM"]).format(slug=cat.slug),
self.settings.get(
- 'CATEGORY_FEED_ATOM_URL',
- str(self.settings['CATEGORY_FEED_ATOM']).format(
- slug=cat.slug
- )),
- feed_title=cat.name
- )
-
- if self.settings.get('CATEGORY_FEED_RSS'):
- writer.write_feed(
- arts,
- self.context,
- str(self.settings['CATEGORY_FEED_RSS']).format(slug=cat.slug),
- self.settings.get(
- 'CATEGORY_FEED_RSS_URL',
- str(self.settings['CATEGORY_FEED_RSS']).format(
- slug=cat.slug
- )),
+ "CATEGORY_FEED_ATOM_URL",
+ str(self.settings["CATEGORY_FEED_ATOM"]).format(slug=cat.slug),
+ ),
feed_title=cat.name,
- feed_type='rss'
- )
+ )
+
+ if self.settings.get("CATEGORY_FEED_RSS"):
+ writer.write_feed(
+ arts,
+ self.context,
+ str(self.settings["CATEGORY_FEED_RSS"]).format(slug=cat.slug),
+ self.settings.get(
+ "CATEGORY_FEED_RSS_URL",
+ str(self.settings["CATEGORY_FEED_RSS"]).format(slug=cat.slug),
+ ),
+ feed_title=cat.name,
+ feed_type="rss",
+ )
for auth, arts in self.authors:
- if self.settings.get('AUTHOR_FEED_ATOM'):
+ if self.settings.get("AUTHOR_FEED_ATOM"):
writer.write_feed(
arts,
self.context,
- str(self.settings['AUTHOR_FEED_ATOM']).format(slug=auth.slug),
+ str(self.settings["AUTHOR_FEED_ATOM"]).format(slug=auth.slug),
self.settings.get(
- 'AUTHOR_FEED_ATOM_URL',
- str(self.settings['AUTHOR_FEED_ATOM']).format(
- slug=auth.slug
- )),
- feed_title=auth.name
- )
-
- if self.settings.get('AUTHOR_FEED_RSS'):
- writer.write_feed(
- arts,
- self.context,
- str(self.settings['AUTHOR_FEED_RSS']).format(slug=auth.slug),
- self.settings.get(
- 'AUTHOR_FEED_RSS_URL',
- str(self.settings['AUTHOR_FEED_RSS']).format(
- slug=auth.slug
- )),
+ "AUTHOR_FEED_ATOM_URL",
+ str(self.settings["AUTHOR_FEED_ATOM"]).format(slug=auth.slug),
+ ),
feed_title=auth.name,
- feed_type='rss'
+ )
+
+ if self.settings.get("AUTHOR_FEED_RSS"):
+ writer.write_feed(
+ arts,
+ self.context,
+ str(self.settings["AUTHOR_FEED_RSS"]).format(slug=auth.slug),
+ self.settings.get(
+ "AUTHOR_FEED_RSS_URL",
+ str(self.settings["AUTHOR_FEED_RSS"]).format(slug=auth.slug),
+ ),
+ feed_title=auth.name,
+ feed_type="rss",
+ )
+
+ if self.settings.get("TAG_FEED_ATOM") or self.settings.get("TAG_FEED_RSS"):
+ for tag, arts in self.tags.items():
+ if self.settings.get("TAG_FEED_ATOM"):
+ writer.write_feed(
+ arts,
+ self.context,
+ str(self.settings["TAG_FEED_ATOM"]).format(slug=tag.slug),
+ self.settings.get(
+ "TAG_FEED_ATOM_URL",
+ str(self.settings["TAG_FEED_ATOM"]).format(slug=tag.slug),
+ ),
+ feed_title=tag.name,
)
- if (self.settings.get('TAG_FEED_ATOM') or
- self.settings.get('TAG_FEED_RSS')):
- for tag, arts in self.tags.items():
- if self.settings.get('TAG_FEED_ATOM'):
+ if self.settings.get("TAG_FEED_RSS"):
writer.write_feed(
arts,
self.context,
- str(self.settings['TAG_FEED_ATOM']).format(slug=tag.slug),
+ str(self.settings["TAG_FEED_RSS"]).format(slug=tag.slug),
self.settings.get(
- 'TAG_FEED_ATOM_URL',
- str(self.settings['TAG_FEED_ATOM']).format(
- slug=tag.slug
- )),
- feed_title=tag.name
- )
-
- if self.settings.get('TAG_FEED_RSS'):
- writer.write_feed(
- arts,
- self.context,
- str(self.settings['TAG_FEED_RSS']).format(slug=tag.slug),
- self.settings.get(
- 'TAG_FEED_RSS_URL',
- str(self.settings['TAG_FEED_RSS']).format(
- slug=tag.slug
- )),
+ "TAG_FEED_RSS_URL",
+ str(self.settings["TAG_FEED_RSS"]).format(slug=tag.slug),
+ ),
feed_title=tag.name,
- feed_type='rss'
- )
+ feed_type="rss",
+ )
- if (self.settings.get('TRANSLATION_FEED_ATOM') or
- self.settings.get('TRANSLATION_FEED_RSS')):
+ if self.settings.get("TRANSLATION_FEED_ATOM") or self.settings.get(
+ "TRANSLATION_FEED_RSS"
+ ):
translations_feeds = defaultdict(list)
for article in chain(self.articles, self.translations):
translations_feeds[article.lang].append(article)
for lang, items in translations_feeds.items():
- items = order_content(
- items, order_by=self.settings['ARTICLE_ORDER_BY'])
- if self.settings.get('TRANSLATION_FEED_ATOM'):
+ items = order_content(items, order_by=self.settings["ARTICLE_ORDER_BY"])
+ if self.settings.get("TRANSLATION_FEED_ATOM"):
writer.write_feed(
items,
self.context,
- str(
- self.settings['TRANSLATION_FEED_ATOM']
- ).format(lang=lang),
+ str(self.settings["TRANSLATION_FEED_ATOM"]).format(lang=lang),
self.settings.get(
- 'TRANSLATION_FEED_ATOM_URL',
- str(
- self.settings['TRANSLATION_FEED_ATOM']
- ).format(lang=lang),
- )
- )
- if self.settings.get('TRANSLATION_FEED_RSS'):
- writer.write_feed(
- items,
- self.context,
- str(
- self.settings['TRANSLATION_FEED_RSS']
- ).format(lang=lang),
- self.settings.get(
- 'TRANSLATION_FEED_RSS_URL',
- str(self.settings['TRANSLATION_FEED_RSS'])).format(
+ "TRANSLATION_FEED_ATOM_URL",
+ str(self.settings["TRANSLATION_FEED_ATOM"]).format(
lang=lang
),
- feed_type='rss'
+ ),
+ )
+ if self.settings.get("TRANSLATION_FEED_RSS"):
+ writer.write_feed(
+ items,
+ self.context,
+ str(self.settings["TRANSLATION_FEED_RSS"]).format(lang=lang),
+ self.settings.get(
+ "TRANSLATION_FEED_RSS_URL",
+ str(self.settings["TRANSLATION_FEED_RSS"]),
+ ).format(lang=lang),
+ feed_type="rss",
)
def generate_articles(self, write):
"""Generate the articles."""
for article in chain(
- self.translations, self.articles,
- self.hidden_translations, self.hidden_articles
+ self.translations,
+ self.articles,
+ self.hidden_translations,
+ self.hidden_articles,
):
signals.article_generator_write_article.send(self, content=article)
- write(article.save_as, self.get_template(article.template),
- self.context, article=article, category=article.category,
- override_output=hasattr(article, 'override_save_as'),
- url=article.url, blog=True)
+ write(
+ article.save_as,
+ self.get_template(article.template),
+ self.context,
+ article=article,
+ category=article.category,
+ override_output=hasattr(article, "override_save_as"),
+ url=article.url,
+ blog=True,
+ )
def generate_period_archives(self, write):
"""Generate per-year, per-month, and per-day archives."""
try:
- template = self.get_template('period_archives')
+ template = self.get_template("period_archives")
except PelicanTemplateNotFound:
- template = self.get_template('archives')
+ template = self.get_template("archives")
for granularity in self.period_archives:
for period in self.period_archives[granularity]:
-
context = self.context.copy()
- context['period'] = period['period']
- context['period_num'] = period['period_num']
+ context["period"] = period["period"]
+ context["period_num"] = period["period_num"]
- write(period['save_as'], template, context,
- articles=period['articles'], dates=period['dates'],
- template_name='period_archives', blog=True,
- url=period['url'], all_articles=self.articles)
+ write(
+ period["save_as"],
+ template,
+ context,
+ articles=period["articles"],
+ dates=period["dates"],
+ template_name="period_archives",
+ blog=True,
+ url=period["url"],
+ all_articles=self.articles,
+ )
def generate_direct_templates(self, write):
"""Generate direct templates pages"""
- for template in self.settings['DIRECT_TEMPLATES']:
- save_as = self.settings.get("%s_SAVE_AS" % template.upper(),
- '%s.html' % template)
- url = self.settings.get("%s_URL" % template.upper(),
- '%s.html' % template)
+ for template in self.settings["DIRECT_TEMPLATES"]:
+ save_as = self.settings.get(
+ "%s_SAVE_AS" % template.upper(), "%s.html" % template
+ )
+ url = self.settings.get("%s_URL" % template.upper(), "%s.html" % template)
if not save_as:
continue
- write(save_as, self.get_template(template), self.context,
- articles=self.articles, dates=self.dates, blog=True,
- template_name=template,
- page_name=os.path.splitext(save_as)[0], url=url)
+ write(
+ save_as,
+ self.get_template(template),
+ self.context,
+ articles=self.articles,
+ dates=self.dates,
+ blog=True,
+ template_name=template,
+ page_name=os.path.splitext(save_as)[0],
+ url=url,
+ )
def generate_tags(self, write):
"""Generate Tags pages."""
- tag_template = self.get_template('tag')
+ tag_template = self.get_template("tag")
for tag, articles in self.tags.items():
dates = [article for article in self.dates if article in articles]
- write(tag.save_as, tag_template, self.context, tag=tag,
- url=tag.url, articles=articles, dates=dates,
- template_name='tag', blog=True, page_name=tag.page_name,
- all_articles=self.articles)
+ write(
+ tag.save_as,
+ tag_template,
+ self.context,
+ tag=tag,
+ url=tag.url,
+ articles=articles,
+ dates=dates,
+ template_name="tag",
+ blog=True,
+ page_name=tag.page_name,
+ all_articles=self.articles,
+ )
def generate_categories(self, write):
"""Generate category pages."""
- category_template = self.get_template('category')
+ category_template = self.get_template("category")
for cat, articles in self.categories:
dates = [article for article in self.dates if article in articles]
- write(cat.save_as, category_template, self.context, url=cat.url,
- category=cat, articles=articles, dates=dates,
- template_name='category', blog=True, page_name=cat.page_name,
- all_articles=self.articles)
+ write(
+ cat.save_as,
+ category_template,
+ self.context,
+ url=cat.url,
+ category=cat,
+ articles=articles,
+ dates=dates,
+ template_name="category",
+ blog=True,
+ page_name=cat.page_name,
+ all_articles=self.articles,
+ )
def generate_authors(self, write):
"""Generate Author pages."""
- author_template = self.get_template('author')
+ author_template = self.get_template("author")
for aut, articles in self.authors:
dates = [article for article in self.dates if article in articles]
- write(aut.save_as, author_template, self.context,
- url=aut.url, author=aut, articles=articles, dates=dates,
- template_name='author', blog=True,
- page_name=aut.page_name, all_articles=self.articles)
+ write(
+ aut.save_as,
+ author_template,
+ self.context,
+ url=aut.url,
+ author=aut,
+ articles=articles,
+ dates=dates,
+ template_name="author",
+ blog=True,
+ page_name=aut.page_name,
+ all_articles=self.articles,
+ )
def generate_drafts(self, write):
"""Generate drafts pages."""
for draft in chain(self.drafts_translations, self.drafts):
- write(draft.save_as, self.get_template(draft.template),
- self.context, article=draft, category=draft.category,
- override_output=hasattr(draft, 'override_save_as'),
- blog=True, all_articles=self.articles, url=draft.url)
+ write(
+ draft.save_as,
+ self.get_template(draft.template),
+ self.context,
+ article=draft,
+ category=draft.category,
+ override_output=hasattr(draft, "override_save_as"),
+ blog=True,
+ all_articles=self.articles,
+ url=draft.url,
+ )
def generate_pages(self, writer):
"""Generate the pages on the disk"""
- write = partial(writer.write_file,
- relative_urls=self.settings['RELATIVE_URLS'])
+ write = partial(writer.write_file, relative_urls=self.settings["RELATIVE_URLS"])
# to minimize the number of relative path stuff modification
# in writer, articles pass first
@@ -583,22 +646,28 @@ class ArticlesGenerator(CachingGenerator):
all_drafts = []
hidden_articles = []
for f in self.get_files(
- self.settings['ARTICLE_PATHS'],
- exclude=self.settings['ARTICLE_EXCLUDES']):
+ self.settings["ARTICLE_PATHS"], exclude=self.settings["ARTICLE_EXCLUDES"]
+ ):
article = self.get_cached_data(f, None)
if article is None:
try:
article = self.readers.read_file(
- base_path=self.path, path=f, content_class=Article,
+ base_path=self.path,
+ path=f,
+ content_class=Article,
context=self.context,
preread_signal=signals.article_generator_preread,
preread_sender=self,
context_signal=signals.article_generator_context,
- context_sender=self)
+ context_sender=self,
+ )
except Exception as e:
logger.error(
- 'Could not process %s\n%s', f, e,
- exc_info=self.settings.get('DEBUG', False))
+ "Could not process %s\n%s",
+ f,
+ e,
+ exc_info=self.settings.get("DEBUG", False),
+ )
self._add_failed_source_path(f)
continue
@@ -620,8 +689,9 @@ class ArticlesGenerator(CachingGenerator):
def _process(arts):
origs, translations = process_translations(
- arts, translation_id=self.settings['ARTICLE_TRANSLATION_ID'])
- origs = order_content(origs, self.settings['ARTICLE_ORDER_BY'])
+ arts, translation_id=self.settings["ARTICLE_TRANSLATION_ID"]
+ )
+ origs = order_content(origs, self.settings["ARTICLE_ORDER_BY"])
return origs, translations
self.articles, self.translations = _process(all_articles)
@@ -634,36 +704,45 @@ class ArticlesGenerator(CachingGenerator):
# only main articles are listed in categories and tags
# not translations or hidden articles
self.categories[article.category].append(article)
- if hasattr(article, 'tags'):
+ if hasattr(article, "tags"):
for tag in article.tags:
self.tags[tag].append(article)
- for author in getattr(article, 'authors', []):
+ for author in getattr(article, "authors", []):
self.authors[author].append(article)
self.dates = list(self.articles)
- self.dates.sort(key=attrgetter('date'),
- reverse=self.context['NEWEST_FIRST_ARCHIVES'])
+ self.dates.sort(
+ key=attrgetter("date"), reverse=self.context["NEWEST_FIRST_ARCHIVES"]
+ )
self.period_archives = self._build_period_archives(
- self.dates, self.articles, self.settings)
+ self.dates, self.articles, self.settings
+ )
# and generate the output :)
# order the categories per name
self.categories = list(self.categories.items())
- self.categories.sort(
- reverse=self.settings['REVERSE_CATEGORY_ORDER'])
+ self.categories.sort(reverse=self.settings["REVERSE_CATEGORY_ORDER"])
self.authors = list(self.authors.items())
self.authors.sort()
- self._update_context((
- 'articles', 'drafts', 'hidden_articles',
- 'dates', 'tags', 'categories',
- 'authors', 'related_posts'))
+ self._update_context(
+ (
+ "articles",
+ "drafts",
+ "hidden_articles",
+ "dates",
+ "tags",
+ "categories",
+ "authors",
+ "related_posts",
+ )
+ )
# _update_context flattens dicts, which should not happen to
# period_archives, so we update the context directly for it:
- self.context['period_archives'] = self.period_archives
+ self.context["period_archives"] = self.period_archives
self.save_cache()
self.readers.save_cache()
signals.article_generator_finalized.send(self)
@@ -677,29 +756,29 @@ class ArticlesGenerator(CachingGenerator):
period_archives = defaultdict(list)
period_archives_settings = {
- 'year': {
- 'save_as': settings['YEAR_ARCHIVE_SAVE_AS'],
- 'url': settings['YEAR_ARCHIVE_URL'],
+ "year": {
+ "save_as": settings["YEAR_ARCHIVE_SAVE_AS"],
+ "url": settings["YEAR_ARCHIVE_URL"],
},
- 'month': {
- 'save_as': settings['MONTH_ARCHIVE_SAVE_AS'],
- 'url': settings['MONTH_ARCHIVE_URL'],
+ "month": {
+ "save_as": settings["MONTH_ARCHIVE_SAVE_AS"],
+ "url": settings["MONTH_ARCHIVE_URL"],
},
- 'day': {
- 'save_as': settings['DAY_ARCHIVE_SAVE_AS'],
- 'url': settings['DAY_ARCHIVE_URL'],
+ "day": {
+ "save_as": settings["DAY_ARCHIVE_SAVE_AS"],
+ "url": settings["DAY_ARCHIVE_URL"],
},
}
granularity_key_func = {
- 'year': attrgetter('date.year'),
- 'month': attrgetter('date.year', 'date.month'),
- 'day': attrgetter('date.year', 'date.month', 'date.day'),
+ "year": attrgetter("date.year"),
+ "month": attrgetter("date.year", "date.month"),
+ "day": attrgetter("date.year", "date.month", "date.day"),
}
- for granularity in 'year', 'month', 'day':
- save_as_fmt = period_archives_settings[granularity]['save_as']
- url_fmt = period_archives_settings[granularity]['url']
+ for granularity in "year", "month", "day":
+ save_as_fmt = period_archives_settings[granularity]["save_as"]
+ url_fmt = period_archives_settings[granularity]["url"]
key_func = granularity_key_func[granularity]
if not save_as_fmt:
@@ -710,26 +789,26 @@ class ArticlesGenerator(CachingGenerator):
archive = {}
dates = list(group)
- archive['dates'] = dates
- archive['articles'] = [a for a in articles if a in dates]
+ archive["dates"] = dates
+ archive["articles"] = [a for a in articles if a in dates]
# use the first date to specify the period archive URL
# and save_as; the specific date used does not matter as
# they all belong to the same period
d = dates[0].date
- archive['save_as'] = save_as_fmt.format(date=d)
- archive['url'] = url_fmt.format(date=d)
+ archive["save_as"] = save_as_fmt.format(date=d)
+ archive["url"] = url_fmt.format(date=d)
- if granularity == 'year':
- archive['period'] = (period,)
- archive['period_num'] = (period,)
+ if granularity == "year":
+ archive["period"] = (period,)
+ archive["period_num"] = (period,)
else:
month_name = calendar.month_name[period[1]]
- if granularity == 'month':
- archive['period'] = (period[0], month_name)
+ if granularity == "month":
+ archive["period"] = (period[0], month_name)
else:
- archive['period'] = (period[0], month_name, period[2])
- archive['period_num'] = tuple(period)
+ archive["period"] = (period[0], month_name, period[2])
+ archive["period_num"] = tuple(period)
period_archives[granularity].append(archive)
@@ -741,13 +820,15 @@ class ArticlesGenerator(CachingGenerator):
signals.article_writer_finalized.send(self, writer=writer)
def refresh_metadata_intersite_links(self):
- for e in chain(self.articles,
- self.translations,
- self.drafts,
- self.drafts_translations,
- self.hidden_articles,
- self.hidden_translations):
- if hasattr(e, 'refresh_metadata_intersite_links'):
+ for e in chain(
+ self.articles,
+ self.translations,
+ self.drafts,
+ self.drafts_translations,
+ self.hidden_articles,
+ self.hidden_translations,
+ ):
+ if hasattr(e, "refresh_metadata_intersite_links"):
e.refresh_metadata_intersite_links()
@@ -769,22 +850,28 @@ class PagesGenerator(CachingGenerator):
hidden_pages = []
draft_pages = []
for f in self.get_files(
- self.settings['PAGE_PATHS'],
- exclude=self.settings['PAGE_EXCLUDES']):
+ self.settings["PAGE_PATHS"], exclude=self.settings["PAGE_EXCLUDES"]
+ ):
page = self.get_cached_data(f, None)
if page is None:
try:
page = self.readers.read_file(
- base_path=self.path, path=f, content_class=Page,
+ base_path=self.path,
+ path=f,
+ content_class=Page,
context=self.context,
preread_signal=signals.page_generator_preread,
preread_sender=self,
context_signal=signals.page_generator_context,
- context_sender=self)
+ context_sender=self,
+ )
except Exception as e:
logger.error(
- 'Could not process %s\n%s', f, e,
- exc_info=self.settings.get('DEBUG', False))
+ "Could not process %s\n%s",
+ f,
+ e,
+ exc_info=self.settings.get("DEBUG", False),
+ )
self._add_failed_source_path(f)
continue
@@ -805,40 +892,51 @@ class PagesGenerator(CachingGenerator):
def _process(pages):
origs, translations = process_translations(
- pages, translation_id=self.settings['PAGE_TRANSLATION_ID'])
- origs = order_content(origs, self.settings['PAGE_ORDER_BY'])
+ pages, translation_id=self.settings["PAGE_TRANSLATION_ID"]
+ )
+ origs = order_content(origs, self.settings["PAGE_ORDER_BY"])
return origs, translations
self.pages, self.translations = _process(all_pages)
self.hidden_pages, self.hidden_translations = _process(hidden_pages)
self.draft_pages, self.draft_translations = _process(draft_pages)
- self._update_context(('pages', 'hidden_pages', 'draft_pages'))
+ self._update_context(("pages", "hidden_pages", "draft_pages"))
self.save_cache()
self.readers.save_cache()
signals.page_generator_finalized.send(self)
def generate_output(self, writer):
- for page in chain(self.translations, self.pages,
- self.hidden_translations, self.hidden_pages,
- self.draft_translations, self.draft_pages):
+ for page in chain(
+ self.translations,
+ self.pages,
+ self.hidden_translations,
+ self.hidden_pages,
+ self.draft_translations,
+ self.draft_pages,
+ ):
signals.page_generator_write_page.send(self, content=page)
writer.write_file(
- page.save_as, self.get_template(page.template),
- self.context, page=page,
- relative_urls=self.settings['RELATIVE_URLS'],
- override_output=hasattr(page, 'override_save_as'),
- url=page.url)
+ page.save_as,
+ self.get_template(page.template),
+ self.context,
+ page=page,
+ relative_urls=self.settings["RELATIVE_URLS"],
+ override_output=hasattr(page, "override_save_as"),
+ url=page.url,
+ )
signals.page_writer_finalized.send(self, writer=writer)
def refresh_metadata_intersite_links(self):
- for e in chain(self.pages,
- self.hidden_pages,
- self.hidden_translations,
- self.draft_pages,
- self.draft_translations):
- if hasattr(e, 'refresh_metadata_intersite_links'):
+ for e in chain(
+ self.pages,
+ self.hidden_pages,
+ self.hidden_translations,
+ self.draft_pages,
+ self.draft_translations,
+ ):
+ if hasattr(e, "refresh_metadata_intersite_links"):
e.refresh_metadata_intersite_links()
@@ -853,71 +951,82 @@ class StaticGenerator(Generator):
def generate_context(self):
self.staticfiles = []
- linked_files = set(self.context['static_links'])
- found_files = self.get_files(self.settings['STATIC_PATHS'],
- exclude=self.settings['STATIC_EXCLUDES'],
- extensions=False)
+ linked_files = set(self.context["static_links"])
+ found_files = self.get_files(
+ self.settings["STATIC_PATHS"],
+ exclude=self.settings["STATIC_EXCLUDES"],
+ extensions=False,
+ )
for f in linked_files | found_files:
-
# skip content source files unless the user explicitly wants them
- if self.settings['STATIC_EXCLUDE_SOURCES']:
+ if self.settings["STATIC_EXCLUDE_SOURCES"]:
if self._is_potential_source_path(f):
continue
static = self.readers.read_file(
- base_path=self.path, path=f, content_class=Static,
- fmt='static', context=self.context,
+ base_path=self.path,
+ path=f,
+ content_class=Static,
+ fmt="static",
+ context=self.context,
preread_signal=signals.static_generator_preread,
preread_sender=self,
context_signal=signals.static_generator_context,
- context_sender=self)
+ context_sender=self,
+ )
self.staticfiles.append(static)
self.add_source_path(static, static=True)
- self._update_context(('staticfiles',))
+ self._update_context(("staticfiles",))
signals.static_generator_finalized.send(self)
def generate_output(self, writer):
- self._copy_paths(self.settings['THEME_STATIC_PATHS'], self.theme,
- self.settings['THEME_STATIC_DIR'], self.output_path,
- os.curdir)
- for sc in self.context['staticfiles']:
+ self._copy_paths(
+ self.settings["THEME_STATIC_PATHS"],
+ self.theme,
+ self.settings["THEME_STATIC_DIR"],
+ self.output_path,
+ os.curdir,
+ )
+ for sc in self.context["staticfiles"]:
if self._file_update_required(sc):
self._link_or_copy_staticfile(sc)
else:
- logger.debug('%s is up to date, not copying', sc.source_path)
+ logger.debug("%s is up to date, not copying", sc.source_path)
- def _copy_paths(self, paths, source, destination, output_path,
- final_path=None):
+ def _copy_paths(self, paths, source, destination, output_path, final_path=None):
"""Copy all the paths from source to destination"""
for path in paths:
source_path = os.path.join(source, path)
if final_path:
if os.path.isfile(source_path):
- destination_path = os.path.join(output_path, destination,
- final_path,
- os.path.basename(path))
+ destination_path = os.path.join(
+ output_path, destination, final_path, os.path.basename(path)
+ )
else:
- destination_path = os.path.join(output_path, destination,
- final_path)
+ destination_path = os.path.join(
+ output_path, destination, final_path
+ )
else:
destination_path = os.path.join(output_path, destination, path)
- copy(source_path, destination_path,
- self.settings['IGNORE_FILES'])
+ copy(source_path, destination_path, self.settings["IGNORE_FILES"])
def _file_update_required(self, staticfile):
source_path = os.path.join(self.path, staticfile.source_path)
save_as = os.path.join(self.output_path, staticfile.save_as)
if not os.path.exists(save_as):
return True
- elif (self.settings['STATIC_CREATE_LINKS'] and
- os.path.samefile(source_path, save_as)):
+ elif self.settings["STATIC_CREATE_LINKS"] and os.path.samefile(
+ source_path, save_as
+ ):
return False
- elif (self.settings['STATIC_CREATE_LINKS'] and
- os.path.realpath(save_as) == source_path):
+ elif (
+ self.settings["STATIC_CREATE_LINKS"]
+ and os.path.realpath(save_as) == source_path
+ ):
return False
- elif not self.settings['STATIC_CHECK_IF_MODIFIED']:
+ elif not self.settings["STATIC_CHECK_IF_MODIFIED"]:
return True
else:
return self._source_is_newer(staticfile)
@@ -930,7 +1039,7 @@ class StaticGenerator(Generator):
return s_mtime - d_mtime > 0.000001
def _link_or_copy_staticfile(self, sc):
- if self.settings['STATIC_CREATE_LINKS']:
+ if self.settings["STATIC_CREATE_LINKS"]:
self._link_staticfile(sc)
else:
self._copy_staticfile(sc)
@@ -940,7 +1049,7 @@ class StaticGenerator(Generator):
save_as = os.path.join(self.output_path, sc.save_as)
self._mkdir(os.path.dirname(save_as))
copy(source_path, save_as)
- logger.info('Copying %s to %s', sc.source_path, sc.save_as)
+ logger.info("Copying %s to %s", sc.source_path, sc.save_as)
def _link_staticfile(self, sc):
source_path = os.path.join(self.path, sc.source_path)
@@ -949,7 +1058,7 @@ class StaticGenerator(Generator):
try:
if os.path.lexists(save_as):
os.unlink(save_as)
- logger.info('Linking %s and %s', sc.source_path, sc.save_as)
+ logger.info("Linking %s and %s", sc.source_path, sc.save_as)
if self.fallback_to_symlinks:
os.symlink(source_path, save_as)
else:
@@ -957,9 +1066,8 @@ class StaticGenerator(Generator):
except OSError as err:
if err.errno == errno.EXDEV: # 18: Invalid cross-device link
logger.debug(
- "Cross-device links not valid. "
- "Creating symbolic links instead."
- )
+ "Cross-device links not valid. " "Creating symbolic links instead."
+ )
self.fallback_to_symlinks = True
self._link_staticfile(sc)
else:
@@ -972,19 +1080,17 @@ class StaticGenerator(Generator):
class SourceFileGenerator(Generator):
-
def generate_context(self):
- self.output_extension = self.settings['OUTPUT_SOURCES_EXTENSION']
+ self.output_extension = self.settings["OUTPUT_SOURCES_EXTENSION"]
def _create_source(self, obj):
output_path, _ = os.path.splitext(obj.save_as)
- dest = os.path.join(self.output_path,
- output_path + self.output_extension)
+ dest = os.path.join(self.output_path, output_path + self.output_extension)
copy(obj.source_path, dest)
def generate_output(self, writer=None):
- logger.info('Generating source files...')
- for obj in chain(self.context['articles'], self.context['pages']):
+ logger.info("Generating source files...")
+ for obj in chain(self.context["articles"], self.context["pages"]):
self._create_source(obj)
for obj_trans in obj.translations:
self._create_source(obj_trans)
diff --git a/pelican/log.py b/pelican/log.py
index be176ea8..0d2b6a3f 100644
--- a/pelican/log.py
+++ b/pelican/log.py
@@ -4,9 +4,7 @@ from collections import defaultdict
from rich.console import Console
from rich.logging import RichHandler
-__all__ = [
- 'init'
-]
+__all__ = ["init"]
console = Console()
@@ -34,8 +32,8 @@ class LimitFilter(logging.Filter):
return True
# extract group
- group = record.__dict__.get('limit_msg', None)
- group_args = record.__dict__.get('limit_args', ())
+ group = record.__dict__.get("limit_msg", None)
+ group_args = record.__dict__.get("limit_args", ())
# ignore record if it was already raised
message_key = (record.levelno, record.getMessage())
@@ -50,7 +48,7 @@ class LimitFilter(logging.Filter):
if logger_level > logging.DEBUG:
template_key = (record.levelno, record.msg)
message_key = (record.levelno, record.getMessage())
- if (template_key in self._ignore or message_key in self._ignore):
+ if template_key in self._ignore or message_key in self._ignore:
return False
# check if we went over threshold
@@ -90,12 +88,12 @@ class FatalLogger(LimitLogger):
def warning(self, *args, **kwargs):
super().warning(*args, **kwargs)
if FatalLogger.warnings_fatal:
- raise RuntimeError('Warning encountered')
+ raise RuntimeError("Warning encountered")
def error(self, *args, **kwargs):
super().error(*args, **kwargs)
if FatalLogger.errors_fatal:
- raise RuntimeError('Error encountered')
+ raise RuntimeError("Error encountered")
logging.setLoggerClass(FatalLogger)
@@ -103,17 +101,19 @@ logging.setLoggerClass(FatalLogger)
logging.getLogger().__class__ = FatalLogger
-def init(level=None, fatal='', handler=RichHandler(console=console), name=None,
- logs_dedup_min_level=None):
- FatalLogger.warnings_fatal = fatal.startswith('warning')
+def init(
+ level=None,
+ fatal="",
+ handler=RichHandler(console=console),
+ name=None,
+ logs_dedup_min_level=None,
+):
+ FatalLogger.warnings_fatal = fatal.startswith("warning")
FatalLogger.errors_fatal = bool(fatal)
LOG_FORMAT = "%(message)s"
logging.basicConfig(
- level=level,
- format=LOG_FORMAT,
- datefmt="[%H:%M:%S]",
- handlers=[handler]
+ level=level, format=LOG_FORMAT, datefmt="[%H:%M:%S]", handlers=[handler]
)
logger = logging.getLogger(name)
@@ -126,17 +126,18 @@ def init(level=None, fatal='', handler=RichHandler(console=console), name=None,
def log_warnings():
import warnings
+
logging.captureWarnings(True)
warnings.simplefilter("default", DeprecationWarning)
- init(logging.DEBUG, name='py.warnings')
+ init(logging.DEBUG, name="py.warnings")
-if __name__ == '__main__':
+if __name__ == "__main__":
init(level=logging.DEBUG, name=__name__)
root_logger = logging.getLogger(__name__)
- root_logger.debug('debug')
- root_logger.info('info')
- root_logger.warning('warning')
- root_logger.error('error')
- root_logger.critical('critical')
+ root_logger.debug("debug")
+ root_logger.info("info")
+ root_logger.warning("warning")
+ root_logger.error("error")
+ root_logger.critical("critical")
diff --git a/pelican/paginator.py b/pelican/paginator.py
index 4231e67b..930c915b 100644
--- a/pelican/paginator.py
+++ b/pelican/paginator.py
@@ -6,8 +6,8 @@ from math import ceil
logger = logging.getLogger(__name__)
PaginationRule = namedtuple(
- 'PaginationRule',
- 'min_page URL SAVE_AS',
+ "PaginationRule",
+ "min_page URL SAVE_AS",
)
@@ -19,7 +19,7 @@ class Paginator:
self.settings = settings
if per_page:
self.per_page = per_page
- self.orphans = settings['DEFAULT_ORPHANS']
+ self.orphans = settings["DEFAULT_ORPHANS"]
else:
self.per_page = len(object_list)
self.orphans = 0
@@ -32,14 +32,21 @@ class Paginator:
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
- return Page(self.name, self.url, self.object_list[bottom:top], number,
- self, self.settings)
+ return Page(
+ self.name,
+ self.url,
+ self.object_list[bottom:top],
+ number,
+ self,
+ self.settings,
+ )
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
self._count = len(self.object_list)
return self._count
+
count = property(_get_count)
def _get_num_pages(self):
@@ -48,6 +55,7 @@ class Paginator:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / (float(self.per_page) or 1)))
return self._num_pages
+
num_pages = property(_get_num_pages)
def _get_page_range(self):
@@ -56,6 +64,7 @@ class Paginator:
a template for loop.
"""
return list(range(1, self.num_pages + 1))
+
page_range = property(_get_page_range)
@@ -64,7 +73,7 @@ class Page:
self.full_name = name
self.name, self.extension = os.path.splitext(name)
dn, fn = os.path.split(name)
- self.base_name = dn if fn in ('index.htm', 'index.html') else self.name
+ self.base_name = dn if fn in ("index.htm", "index.html") else self.name
self.base_url = url
self.object_list = object_list
self.number = number
@@ -72,7 +81,7 @@ class Page:
self.settings = settings
def __repr__(self):
- return ''.format(self.number, self.paginator.num_pages)
+ return "".format(self.number, self.paginator.num_pages)
def has_next(self):
return self.number < self.paginator.num_pages
@@ -117,7 +126,7 @@ class Page:
rule = None
# find the last matching pagination rule
- for p in self.settings['PAGINATION_PATTERNS']:
+ for p in self.settings["PAGINATION_PATTERNS"]:
if p.min_page == -1:
if not self.has_next():
rule = p
@@ -127,22 +136,22 @@ class Page:
rule = p
if not rule:
- return ''
+ return ""
prop_value = getattr(rule, key)
if not isinstance(prop_value, str):
- logger.warning('%s is set to %s', key, prop_value)
+ logger.warning("%s is set to %s", key, prop_value)
return prop_value
# URL or SAVE_AS is a string, format it with a controlled context
context = {
- 'save_as': self.full_name,
- 'url': self.base_url,
- 'name': self.name,
- 'base_name': self.base_name,
- 'extension': self.extension,
- 'number': self.number,
+ "save_as": self.full_name,
+ "url": self.base_url,
+ "name": self.name,
+ "base_name": self.base_name,
+ "extension": self.extension,
+ "number": self.number,
}
ret = prop_value.format(**context)
@@ -155,9 +164,9 @@ class Page:
# changed to lstrip() because that would remove all leading slashes and
# thus make the workaround impossible. See
# test_custom_pagination_pattern() for a verification of this.
- if ret.startswith('/'):
+ if ret.startswith("/"):
ret = ret[1:]
return ret
- url = property(functools.partial(_from_settings, key='URL'))
- save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
+ url = property(functools.partial(_from_settings, key="URL"))
+ save_as = property(functools.partial(_from_settings, key="SAVE_AS"))
diff --git a/pelican/plugins/_utils.py b/pelican/plugins/_utils.py
index 87877b08..f0c18f5c 100644
--- a/pelican/plugins/_utils.py
+++ b/pelican/plugins/_utils.py
@@ -24,26 +24,26 @@ def get_namespace_plugins(ns_pkg=None):
return {
name: importlib.import_module(name)
- for finder, name, ispkg
- in iter_namespace(ns_pkg)
+ for finder, name, ispkg in iter_namespace(ns_pkg)
if ispkg
}
def list_plugins(ns_pkg=None):
from pelican.log import init as init_logging
+
init_logging(logging.INFO)
ns_plugins = get_namespace_plugins(ns_pkg)
if ns_plugins:
- logger.info('Plugins found:\n' + '\n'.join(ns_plugins))
+ logger.info("Plugins found:\n" + "\n".join(ns_plugins))
else:
- logger.info('No plugins are installed')
+ logger.info("No plugins are installed")
def load_legacy_plugin(plugin, plugin_paths):
- if '.' in plugin:
+ if "." in plugin:
# it is in a package, try to resolve package first
- package, _, _ = plugin.rpartition('.')
+ package, _, _ = plugin.rpartition(".")
load_legacy_plugin(package, plugin_paths)
# Try to find plugin in PLUGIN_PATHS
@@ -52,7 +52,7 @@ def load_legacy_plugin(plugin, plugin_paths):
# If failed, try to find it in normal importable locations
spec = importlib.util.find_spec(plugin)
if spec is None:
- raise ImportError('Cannot import plugin `{}`'.format(plugin))
+ raise ImportError("Cannot import plugin `{}`".format(plugin))
else:
# Avoid loading the same plugin twice
if spec.name in sys.modules:
@@ -78,30 +78,28 @@ def load_legacy_plugin(plugin, plugin_paths):
def load_plugins(settings):
- logger.debug('Finding namespace plugins')
+ logger.debug("Finding namespace plugins")
namespace_plugins = get_namespace_plugins()
if namespace_plugins:
- logger.debug('Namespace plugins found:\n' +
- '\n'.join(namespace_plugins))
+ logger.debug("Namespace plugins found:\n" + "\n".join(namespace_plugins))
plugins = []
- if settings.get('PLUGINS') is not None:
- for plugin in settings['PLUGINS']:
+ if settings.get("PLUGINS") is not None:
+ for plugin in settings["PLUGINS"]:
if isinstance(plugin, str):
- logger.debug('Loading plugin `%s`', plugin)
+ logger.debug("Loading plugin `%s`", plugin)
# try to find in namespace plugins
if plugin in namespace_plugins:
plugin = namespace_plugins[plugin]
- elif 'pelican.plugins.{}'.format(plugin) in namespace_plugins:
- plugin = namespace_plugins['pelican.plugins.{}'.format(
- plugin)]
+ elif "pelican.plugins.{}".format(plugin) in namespace_plugins:
+ plugin = namespace_plugins["pelican.plugins.{}".format(plugin)]
# try to import it
else:
try:
plugin = load_legacy_plugin(
- plugin,
- settings.get('PLUGIN_PATHS', []))
+ plugin, settings.get("PLUGIN_PATHS", [])
+ )
except ImportError as e:
- logger.error('Cannot load plugin `%s`\n%s', plugin, e)
+ logger.error("Cannot load plugin `%s`\n%s", plugin, e)
continue
plugins.append(plugin)
else:
diff --git a/pelican/plugins/signals.py b/pelican/plugins/signals.py
index 4013360f..ff129cb4 100644
--- a/pelican/plugins/signals.py
+++ b/pelican/plugins/signals.py
@@ -2,48 +2,48 @@ from blinker import signal
# Run-level signals:
-initialized = signal('pelican_initialized')
-get_generators = signal('get_generators')
-all_generators_finalized = signal('all_generators_finalized')
-get_writer = signal('get_writer')
-finalized = signal('pelican_finalized')
+initialized = signal("pelican_initialized")
+get_generators = signal("get_generators")
+all_generators_finalized = signal("all_generators_finalized")
+get_writer = signal("get_writer")
+finalized = signal("pelican_finalized")
# Reader-level signals
-readers_init = signal('readers_init')
+readers_init = signal("readers_init")
# Generator-level signals
-generator_init = signal('generator_init')
+generator_init = signal("generator_init")
-article_generator_init = signal('article_generator_init')
-article_generator_pretaxonomy = signal('article_generator_pretaxonomy')
-article_generator_finalized = signal('article_generator_finalized')
-article_generator_write_article = signal('article_generator_write_article')
-article_writer_finalized = signal('article_writer_finalized')
+article_generator_init = signal("article_generator_init")
+article_generator_pretaxonomy = signal("article_generator_pretaxonomy")
+article_generator_finalized = signal("article_generator_finalized")
+article_generator_write_article = signal("article_generator_write_article")
+article_writer_finalized = signal("article_writer_finalized")
-page_generator_init = signal('page_generator_init')
-page_generator_finalized = signal('page_generator_finalized')
-page_generator_write_page = signal('page_generator_write_page')
-page_writer_finalized = signal('page_writer_finalized')
+page_generator_init = signal("page_generator_init")
+page_generator_finalized = signal("page_generator_finalized")
+page_generator_write_page = signal("page_generator_write_page")
+page_writer_finalized = signal("page_writer_finalized")
-static_generator_init = signal('static_generator_init')
-static_generator_finalized = signal('static_generator_finalized')
+static_generator_init = signal("static_generator_init")
+static_generator_finalized = signal("static_generator_finalized")
# Page-level signals
-article_generator_preread = signal('article_generator_preread')
-article_generator_context = signal('article_generator_context')
+article_generator_preread = signal("article_generator_preread")
+article_generator_context = signal("article_generator_context")
-page_generator_preread = signal('page_generator_preread')
-page_generator_context = signal('page_generator_context')
+page_generator_preread = signal("page_generator_preread")
+page_generator_context = signal("page_generator_context")
-static_generator_preread = signal('static_generator_preread')
-static_generator_context = signal('static_generator_context')
+static_generator_preread = signal("static_generator_preread")
+static_generator_context = signal("static_generator_context")
-content_object_init = signal('content_object_init')
+content_object_init = signal("content_object_init")
# Writers signals
-content_written = signal('content_written')
-feed_generated = signal('feed_generated')
-feed_written = signal('feed_written')
+content_written = signal("content_written")
+feed_generated = signal("feed_generated")
+feed_written = signal("feed_written")
diff --git a/pelican/readers.py b/pelican/readers.py
index 03c3cc20..5033c0bd 100644
--- a/pelican/readers.py
+++ b/pelican/readers.py
@@ -31,33 +31,29 @@ except ImportError:
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
- 'tags': False,
- 'date': False,
- 'modified': False,
- 'status': False,
- 'category': False,
- 'author': False,
- 'save_as': False,
- 'url': False,
- 'authors': False,
- 'slug': False
+ "tags": False,
+ "date": False,
+ "modified": False,
+ "status": False,
+ "category": False,
+ "author": False,
+ "save_as": False,
+ "url": False,
+ "authors": False,
+ "slug": False,
}
METADATA_PROCESSORS = {
- 'tags': lambda x, y: ([
- Tag(tag, y)
- for tag in ensure_metadata_list(x)
- ] or _DISCARD),
- 'date': lambda x, y: get_date(x.replace('_', ' ')),
- 'modified': lambda x, y: get_date(x),
- 'status': lambda x, y: x.strip() or _DISCARD,
- 'category': lambda x, y: _process_if_nonempty(Category, x, y),
- 'author': lambda x, y: _process_if_nonempty(Author, x, y),
- 'authors': lambda x, y: ([
- Author(author, y)
- for author in ensure_metadata_list(x)
- ] or _DISCARD),
- 'slug': lambda x, y: x.strip() or _DISCARD,
+ "tags": lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)] or _DISCARD),
+ "date": lambda x, y: get_date(x.replace("_", " ")),
+ "modified": lambda x, y: get_date(x),
+ "status": lambda x, y: x.strip() or _DISCARD,
+ "category": lambda x, y: _process_if_nonempty(Category, x, y),
+ "author": lambda x, y: _process_if_nonempty(Author, x, y),
+ "authors": lambda x, y: (
+ [Author(author, y) for author in ensure_metadata_list(x)] or _DISCARD
+ ),
+ "slug": lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
@@ -65,25 +61,23 @@ logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
- the same way as Docutils' "authors" field: if it's already a list,
- those boundaries are preserved; otherwise, it must be a string;
- if the string contains semicolons, it is split on semicolons;
- otherwise, it is split on commas. This allows you to write
- author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
- format.
+ the same way as Docutils' "authors" field: if it's already a list,
+ those boundaries are preserved; otherwise, it must be a string;
+ if the string contains semicolons, it is split on semicolons;
+ otherwise, it is split on commas. This allows you to write
+ author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
+ format.
- Regardless, all list items undergo .strip() before returning, and
- empty items are discarded.
+ Regardless, all list items undergo .strip() before returning, and
+ empty items are discarded.
"""
if isinstance(text, str):
- if ';' in text:
- text = text.split(';')
+ if ";" in text:
+ text = text.split(";")
else:
- text = text.split(',')
+ text = text.split(",")
- return list(OrderedDict.fromkeys(
- [v for v in (w.strip() for w in text) if v]
- ))
+ return list(OrderedDict.fromkeys([v for v in (w.strip() for w in text) if v]))
def _process_if_nonempty(processor, name, settings):
@@ -112,8 +106,9 @@ class BaseReader:
Markdown).
"""
+
enabled = True
- file_extensions = ['static']
+ file_extensions = ["static"]
extensions = None
def __init__(self, settings):
@@ -132,13 +127,12 @@ class BaseReader:
class _FieldBodyTranslator(HTMLTranslator):
-
def __init__(self, document):
super().__init__(document)
self.compact_p = None
def astext(self):
- return ''.join(self.body)
+ return "".join(self.body)
def visit_field_body(self, node):
pass
@@ -154,27 +148,25 @@ def render_node_to_html(document, node, field_body_translator_class):
class PelicanHTMLWriter(Writer):
-
def __init__(self):
super().__init__()
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
-
def visit_abbreviation(self, node):
attrs = {}
- if node.hasattr('explanation'):
- attrs['title'] = node['explanation']
- self.body.append(self.starttag(node, 'abbr', '', **attrs))
+ if node.hasattr("explanation"):
+ attrs["title"] = node["explanation"]
+ self.body.append(self.starttag(node, "abbr", "", **attrs))
def depart_abbreviation(self, node):
- self.body.append('')
+ self.body.append("")
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
- node['alt'] = node.get('alt', '')
+ node["alt"] = node.get("alt", "")
return HTMLTranslator.visit_image(self, node)
@@ -194,7 +186,7 @@ class RstReader(BaseReader):
"""
enabled = bool(docutils)
- file_extensions = ['rst']
+ file_extensions = ["rst"]
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
@@ -202,25 +194,28 @@ class RstReader(BaseReader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- lang_code = self.settings.get('DEFAULT_LANG', 'en')
+ lang_code = self.settings.get("DEFAULT_LANG", "en")
if get_docutils_lang(lang_code):
self._language_code = lang_code
else:
- logger.warning("Docutils has no localization for '%s'."
- " Using 'en' instead.", lang_code)
- self._language_code = 'en'
+ logger.warning(
+ "Docutils has no localization for '%s'." " Using 'en' instead.",
+ lang_code,
+ )
+ self._language_code = "en"
def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata"""
- formatted_fields = self.settings['FORMATTED_FIELDS']
+ formatted_fields = self.settings["FORMATTED_FIELDS"]
output = {}
if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning(
- 'Document title missing in file %s: '
- 'Ensure exactly one top level section',
- source_path)
+ "Document title missing in file %s: "
+ "Ensure exactly one top level section",
+ source_path,
+ )
try:
# docutils 0.18.1+
@@ -231,16 +226,16 @@ class RstReader(BaseReader):
for docinfo in nodes:
for element in docinfo.children:
- if element.tagname == 'field': # custom fields (e.g. summary)
+ if element.tagname == "field": # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name.lower() in formatted_fields:
value = render_node_to_html(
- document, body_elem,
- self.field_body_translator_class)
+ document, body_elem, self.field_body_translator_class
+ )
else:
value = body_elem.astext()
- elif element.tagname == 'authors': # author list
+ elif element.tagname == "authors": # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
@@ -252,22 +247,24 @@ class RstReader(BaseReader):
return output
def _get_publisher(self, source_path):
- extra_params = {'initial_header_level': '2',
- 'syntax_highlight': 'short',
- 'input_encoding': 'utf-8',
- 'language_code': self._language_code,
- 'halt_level': 2,
- 'traceback': True,
- 'warning_stream': StringIO(),
- 'embed_stylesheet': False}
- user_params = self.settings.get('DOCUTILS_SETTINGS')
+ extra_params = {
+ "initial_header_level": "2",
+ "syntax_highlight": "short",
+ "input_encoding": "utf-8",
+ "language_code": self._language_code,
+ "halt_level": 2,
+ "traceback": True,
+ "warning_stream": StringIO(),
+ "embed_stylesheet": False,
+ }
+ user_params = self.settings.get("DOCUTILS_SETTINGS")
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
- writer=self.writer_class(),
- destination_class=docutils.io.StringOutput)
- pub.set_components('standalone', 'restructuredtext', 'html')
+ writer=self.writer_class(), destination_class=docutils.io.StringOutput
+ )
+ pub.set_components("standalone", "restructuredtext", "html")
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish()
@@ -277,10 +274,10 @@ class RstReader(BaseReader):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
- content = parts.get('body')
+ content = parts.get("body")
metadata = self._parse_metadata(pub.document, source_path)
- metadata.setdefault('title', parts.get('title'))
+ metadata.setdefault("title", parts.get("title"))
return content, metadata
@@ -289,26 +286,26 @@ class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
- file_extensions = ['md', 'markdown', 'mkd', 'mdown']
+ file_extensions = ["md", "markdown", "mkd", "mdown"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- settings = self.settings['MARKDOWN']
- settings.setdefault('extension_configs', {})
- settings.setdefault('extensions', [])
- for extension in settings['extension_configs'].keys():
- if extension not in settings['extensions']:
- settings['extensions'].append(extension)
- if 'markdown.extensions.meta' not in settings['extensions']:
- settings['extensions'].append('markdown.extensions.meta')
+ settings = self.settings["MARKDOWN"]
+ settings.setdefault("extension_configs", {})
+ settings.setdefault("extensions", [])
+ for extension in settings["extension_configs"].keys():
+ if extension not in settings["extensions"]:
+ settings["extensions"].append(extension)
+ if "markdown.extensions.meta" not in settings["extensions"]:
+ settings["extensions"].append("markdown.extensions.meta")
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
- formatted_fields = self.settings['FORMATTED_FIELDS']
+ formatted_fields = self.settings["FORMATTED_FIELDS"]
# prevent metadata extraction in fields
- self._md.preprocessors.deregister('meta')
+ self._md.preprocessors.deregister("meta")
output = {}
for name, value in meta.items():
@@ -323,9 +320,10 @@ class MarkdownReader(BaseReader):
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1:
logger.warning(
- 'Duplicate definition of `%s` '
- 'for %s. Using first one.',
- name, self._source_path)
+ "Duplicate definition of `%s` " "for %s. Using first one.",
+ name,
+ self._source_path,
+ )
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
@@ -339,11 +337,11 @@ class MarkdownReader(BaseReader):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
- self._md = Markdown(**self.settings['MARKDOWN'])
+ self._md = Markdown(**self.settings["MARKDOWN"])
with pelican_open(source_path) as text:
content = self._md.convert(text)
- if hasattr(self._md, 'Meta'):
+ if hasattr(self._md, "Meta"):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
@@ -353,17 +351,17 @@ class MarkdownReader(BaseReader):
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
- file_extensions = ['htm', 'html']
+ file_extensions = ["htm", "html"]
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
super().__init__(convert_charrefs=False)
- self.body = ''
+ self.body = ""
self.metadata = {}
self.settings = settings
- self._data_buffer = ''
+ self._data_buffer = ""
self._filename = filename
@@ -374,59 +372,59 @@ class HTMLReader(BaseReader):
self._in_tags = False
def handle_starttag(self, tag, attrs):
- if tag == 'head' and self._in_top_level:
+ if tag == "head" and self._in_top_level:
self._in_top_level = False
self._in_head = True
- elif tag == 'title' and self._in_head:
+ elif tag == "title" and self._in_head:
self._in_title = True
- self._data_buffer = ''
- elif tag == 'body' and self._in_top_level:
+ self._data_buffer = ""
+ elif tag == "body" and self._in_top_level:
self._in_top_level = False
self._in_body = True
- self._data_buffer = ''
- elif tag == 'meta' and self._in_head:
+ self._data_buffer = ""
+ elif tag == "meta" and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
- if tag == 'head':
+ if tag == "head":
if self._in_head:
self._in_head = False
self._in_top_level = True
- elif self._in_head and tag == 'title':
+ elif self._in_head and tag == "title":
self._in_title = False
- self.metadata['title'] = self._data_buffer
- elif tag == 'body':
+ self.metadata["title"] = self._data_buffer
+ elif tag == "body":
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
- self._data_buffer += '{}>'.format(escape(tag))
+ self._data_buffer += "{}>".format(escape(tag))
def handle_startendtag(self, tag, attrs):
- if tag == 'meta' and self._in_head:
+ if tag == "meta" and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
- self._data_buffer += ''.format(data)
+ self._data_buffer += "".format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
- self._data_buffer += '&{};'.format(data)
+ self._data_buffer += "&{};".format(data)
def handle_charref(self, data):
- self._data_buffer += '{};'.format(data)
+ self._data_buffer += "{};".format(data)
def build_tag(self, tag, attrs, close_tag):
- result = '<{}'.format(escape(tag))
+ result = "<{}".format(escape(tag))
for k, v in attrs:
- result += ' ' + escape(k)
+ result += " " + escape(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
@@ -435,33 +433,39 @@ class HTMLReader(BaseReader):
else:
result += '="{}"'.format(escape(v, quote=False))
if close_tag:
- return result + ' />'
- return result + '>'
+ return result + " />"
+ return result + ">"
def _handle_meta_tag(self, attrs):
- name = self._attr_value(attrs, 'name')
+ name = self._attr_value(attrs, "name")
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
- attr_serialized = ', '.join(attr_list)
- logger.warning("Meta tag in file %s does not have a 'name' "
- "attribute, skipping. Attributes: %s",
- self._filename, attr_serialized)
+ attr_serialized = ", ".join(attr_list)
+ logger.warning(
+ "Meta tag in file %s does not have a 'name' "
+ "attribute, skipping. Attributes: %s",
+ self._filename,
+ attr_serialized,
+ )
return
name = name.lower()
- contents = self._attr_value(attrs, 'content', '')
+ contents = self._attr_value(attrs, "content", "")
if not contents:
- contents = self._attr_value(attrs, 'contents', '')
+ contents = self._attr_value(attrs, "contents", "")
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
- extra={'limit_msg': "Other files have meta tag "
- "attribute 'contents' that should "
- "be changed to 'content'"})
+ extra={
+ "limit_msg": "Other files have meta tag "
+ "attribute 'contents' that should "
+ "be changed to 'content'"
+ },
+ )
- if name == 'keywords':
- name = 'tags'
+ if name == "keywords":
+ name = "tags"
if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the
@@ -501,22 +505,23 @@ class Readers(FileStampDataCacher):
"""
- def __init__(self, settings=None, cache_name=''):
+ def __init__(self, settings=None, cache_name=""):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
- logger.debug('Missing dependencies for %s',
- ', '.join(cls.file_extensions))
+ logger.debug(
+ "Missing dependencies for %s", ", ".join(cls.file_extensions)
+ )
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
- if self.settings['READERS']:
- self.reader_classes.update(self.settings['READERS'])
+ if self.settings["READERS"]:
+ self.reader_classes.update(self.settings["READERS"])
signals.readers_init.send(self)
@@ -527,53 +532,67 @@ class Readers(FileStampDataCacher):
self.readers[fmt] = reader_class(self.settings)
# set up caching
- cache_this_level = (cache_name != '' and
- self.settings['CONTENT_CACHING_LAYER'] == 'reader')
- caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
- load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
+ cache_this_level = (
+ cache_name != "" and self.settings["CONTENT_CACHING_LAYER"] == "reader"
+ )
+ caching_policy = cache_this_level and self.settings["CACHE_CONTENT"]
+ load_policy = cache_this_level and self.settings["LOAD_CONTENT_CACHE"]
super().__init__(settings, cache_name, caching_policy, load_policy)
@property
def extensions(self):
return self.readers.keys()
- def read_file(self, base_path, path, content_class=Page, fmt=None,
- context=None, preread_signal=None, preread_sender=None,
- context_signal=None, context_sender=None):
+ def read_file(
+ self,
+ base_path,
+ path,
+ content_class=Page,
+ fmt=None,
+ context=None,
+ preread_signal=None,
+ preread_sender=None,
+ context_signal=None,
+ context_sender=None,
+ ):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
- logger.debug(
- 'Read file %s -> %s',
- source_path, content_class.__name__)
+ logger.debug("Read file %s -> %s", source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
- raise TypeError(
- 'Pelican does not know how to parse %s', path)
+ raise TypeError("Pelican does not know how to parse %s", path)
if preread_signal:
- logger.debug(
- 'Signal %s.send(%s)',
- preread_signal.name, preread_sender)
+ logger.debug("Signal %s.send(%s)", preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
- metadata = _filter_discardable_metadata(default_metadata(
- settings=self.settings, process=reader.process_metadata))
- metadata.update(path_metadata(
- full_path=path, source_path=source_path,
- settings=self.settings))
- metadata.update(_filter_discardable_metadata(parse_path_metadata(
- source_path=source_path, settings=self.settings,
- process=reader.process_metadata)))
+ metadata = _filter_discardable_metadata(
+ default_metadata(settings=self.settings, process=reader.process_metadata)
+ )
+ metadata.update(
+ path_metadata(
+ full_path=path, source_path=source_path, settings=self.settings
+ )
+ )
+ metadata.update(
+ _filter_discardable_metadata(
+ parse_path_metadata(
+ source_path=source_path,
+ settings=self.settings,
+ process=reader.process_metadata,
+ )
+ )
+ )
reader_name = reader.__class__.__name__
- metadata['reader'] = reader_name.replace('Reader', '').lower()
+ metadata["reader"] = reader_name.replace("Reader", "").lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
@@ -587,14 +606,14 @@ class Readers(FileStampDataCacher):
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
- if self.settings['TYPOGRIFY']:
+ if self.settings["TYPOGRIFY"]:
from typogrify.filters import typogrify
import smartypants
- typogrify_dashes = self.settings['TYPOGRIFY_DASHES']
- if typogrify_dashes == 'oldschool':
+ typogrify_dashes = self.settings["TYPOGRIFY_DASHES"]
+ if typogrify_dashes == "oldschool":
smartypants.Attr.default = smartypants.Attr.set2
- elif typogrify_dashes == 'oldschool_inverted':
+ elif typogrify_dashes == "oldschool_inverted":
smartypants.Attr.default = smartypants.Attr.set3
else:
smartypants.Attr.default = smartypants.Attr.set1
@@ -608,31 +627,32 @@ class Readers(FileStampDataCacher):
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
- return typogrify(
- text,
- self.settings['TYPOGRIFY_IGNORE_TAGS'])
+ return typogrify(text, self.settings["TYPOGRIFY_IGNORE_TAGS"])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
- if 'title' in metadata:
- metadata['title'] = typogrify_wrapper(metadata['title'])
+ if "title" in metadata:
+ metadata["title"] = typogrify_wrapper(metadata["title"])
- if 'summary' in metadata:
- metadata['summary'] = typogrify_wrapper(metadata['summary'])
+ if "summary" in metadata:
+ metadata["summary"] = typogrify_wrapper(metadata["summary"])
if context_signal:
logger.debug(
- 'Signal %s.send(%s, )',
- context_signal.name,
- context_sender)
+ "Signal %s.send(%s, )", context_signal.name, context_sender
+ )
context_signal.send(context_sender, metadata=metadata)
- return content_class(content=content, metadata=metadata,
- settings=self.settings, source_path=path,
- context=context)
+ return content_class(
+ content=content,
+ metadata=metadata,
+ settings=self.settings,
+ source_path=path,
+ context=context,
+ )
def find_empty_alt(content, path):
@@ -642,7 +662,8 @@ def find_empty_alt(content, path):
as they are really likely to be accessibility flaws.
"""
- imgs = re.compile(r"""
+ imgs = re.compile(
+ r"""
(?:
# src before alt
]*
src=(['"])(.*?)\5
)
- """, re.X)
+ """,
+ re.X,
+ )
for match in re.findall(imgs, content):
logger.warning(
- 'Empty alt attribute for image %s in %s',
- os.path.basename(match[1] + match[5]), path,
- extra={'limit_msg': 'Other images have empty alt attributes'})
+ "Empty alt attribute for image %s in %s",
+ os.path.basename(match[1] + match[5]),
+ path,
+ extra={"limit_msg": "Other images have empty alt attributes"},
+ )
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
- for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
+ for name, value in dict(settings.get("DEFAULT_METADATA", {})).items():
if process:
value = process(name, value)
metadata[name] = value
- if 'DEFAULT_CATEGORY' in settings:
- value = settings['DEFAULT_CATEGORY']
+ if "DEFAULT_CATEGORY" in settings:
+ value = settings["DEFAULT_CATEGORY"]
if process:
- value = process('category', value)
- metadata['category'] = value
- if settings.get('DEFAULT_DATE', None) and \
- settings['DEFAULT_DATE'] != 'fs':
- if isinstance(settings['DEFAULT_DATE'], str):
- metadata['date'] = get_date(settings['DEFAULT_DATE'])
+ value = process("category", value)
+ metadata["category"] = value
+ if settings.get("DEFAULT_DATE", None) and settings["DEFAULT_DATE"] != "fs":
+ if isinstance(settings["DEFAULT_DATE"], str):
+ metadata["date"] = get_date(settings["DEFAULT_DATE"])
else:
- metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE'])
+ metadata["date"] = datetime.datetime(*settings["DEFAULT_DATE"])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
- if settings.get('DEFAULT_DATE', None) == 'fs':
- metadata['date'] = datetime.datetime.fromtimestamp(
- os.stat(full_path).st_mtime)
- metadata['modified'] = metadata['date']
+ if settings.get("DEFAULT_DATE", None) == "fs":
+ metadata["date"] = datetime.datetime.fromtimestamp(
+ os.stat(full_path).st_mtime
+ )
+ metadata["modified"] = metadata["date"]
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts.
- epm = settings.get('EXTRA_PATH_METADATA', {})
+ epm = settings.get("EXTRA_PATH_METADATA", {})
for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name
# is a prefix of another's.
- dirpath = posixize_path(os.path.join(path, ''))
+ dirpath = posixize_path(os.path.join(path, ""))
if source_path == path or source_path.startswith(dirpath):
metadata.update(meta)
@@ -736,11 +761,10 @@ def parse_path_metadata(source_path, settings=None, process=None):
subdir = os.path.basename(dirname)
if settings:
checks = []
- for key, data in [('FILENAME_METADATA', base),
- ('PATH_METADATA', source_path)]:
+ for key, data in [("FILENAME_METADATA", base), ("PATH_METADATA", source_path)]:
checks.append((settings.get(key, None), data))
- if settings.get('USE_FOLDER_AS_CATEGORY', None):
- checks.append(('(?P.*)', subdir))
+ if settings.get("USE_FOLDER_AS_CATEGORY", None):
+ checks.append(("(?P.*)", subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py
index 500c8578..0a549424 100644
--- a/pelican/rstdirectives.py
+++ b/pelican/rstdirectives.py
@@ -11,26 +11,26 @@ import pelican.settings as pys
class Pygments(Directive):
- """ Source code syntax highlighting.
- """
+ """Source code syntax highlighting."""
+
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
- 'anchorlinenos': directives.flag,
- 'classprefix': directives.unchanged,
- 'hl_lines': directives.unchanged,
- 'lineanchors': directives.unchanged,
- 'linenos': directives.unchanged,
- 'linenospecial': directives.nonnegative_int,
- 'linenostart': directives.nonnegative_int,
- 'linenostep': directives.nonnegative_int,
- 'lineseparator': directives.unchanged,
- 'linespans': directives.unchanged,
- 'nobackground': directives.flag,
- 'nowrap': directives.flag,
- 'tagsfile': directives.unchanged,
- 'tagurlformat': directives.unchanged,
+ "anchorlinenos": directives.flag,
+ "classprefix": directives.unchanged,
+ "hl_lines": directives.unchanged,
+ "lineanchors": directives.unchanged,
+ "linenos": directives.unchanged,
+ "linenospecial": directives.nonnegative_int,
+ "linenostart": directives.nonnegative_int,
+ "linenostep": directives.nonnegative_int,
+ "lineseparator": directives.unchanged,
+ "linespans": directives.unchanged,
+ "nobackground": directives.flag,
+ "nowrap": directives.flag,
+ "tagsfile": directives.unchanged,
+ "tagurlformat": directives.unchanged,
}
has_content = True
@@ -49,28 +49,30 @@ class Pygments(Directive):
if k not in self.options:
self.options[k] = v
- if ('linenos' in self.options and
- self.options['linenos'] not in ('table', 'inline')):
- if self.options['linenos'] == 'none':
- self.options.pop('linenos')
+ if "linenos" in self.options and self.options["linenos"] not in (
+ "table",
+ "inline",
+ ):
+ if self.options["linenos"] == "none":
+ self.options.pop("linenos")
else:
- self.options['linenos'] = 'table'
+ self.options["linenos"] = "table"
- for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
+ for flag in ("nowrap", "nobackground", "anchorlinenos"):
if flag in self.options:
self.options[flag] = True
# noclasses should already default to False, but just in case...
formatter = HtmlFormatter(noclasses=False, **self.options)
- parsed = highlight('\n'.join(self.content), lexer, formatter)
- return [nodes.raw('', parsed, format='html')]
+ parsed = highlight("\n".join(self.content), lexer, formatter)
+ return [nodes.raw("", parsed, format="html")]
-directives.register_directive('code-block', Pygments)
-directives.register_directive('sourcecode', Pygments)
+directives.register_directive("code-block", Pygments)
+directives.register_directive("sourcecode", Pygments)
-_abbr_re = re.compile(r'\((.*)\)$', re.DOTALL)
+_abbr_re = re.compile(r"\((.*)\)$", re.DOTALL)
class abbreviation(nodes.Inline, nodes.TextElement):
@@ -82,9 +84,9 @@ def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
m = _abbr_re.search(text)
if m is None:
return [abbreviation(text, text)], []
- abbr = text[:m.start()].strip()
+ abbr = text[: m.start()].strip()
expl = m.group(1)
return [abbreviation(abbr, abbr, explanation=expl)], []
-roles.register_local_role('abbr', abbr_role)
+roles.register_local_role("abbr", abbr_role)
diff --git a/pelican/server.py b/pelican/server.py
index 913c3761..61729bf1 100644
--- a/pelican/server.py
+++ b/pelican/server.py
@@ -14,38 +14,47 @@ except ImportError:
from pelican.log import console # noqa: F401
from pelican.log import init as init_logging
+
logger = logging.getLogger(__name__)
def parse_arguments():
parser = argparse.ArgumentParser(
- description='Pelican Development Server',
- formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ description="Pelican Development Server",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "port", default=8000, type=int, nargs="?", help="Port to Listen On"
+ )
+ parser.add_argument("server", default="", nargs="?", help="Interface to Listen On")
+ parser.add_argument("--ssl", action="store_true", help="Activate SSL listener")
+ parser.add_argument(
+ "--cert",
+ default="./cert.pem",
+ nargs="?",
+ help="Path to certificate file. " + "Relative to current directory",
+ )
+ parser.add_argument(
+ "--key",
+ default="./key.pem",
+ nargs="?",
+ help="Path to certificate key file. " + "Relative to current directory",
+ )
+ parser.add_argument(
+ "--path",
+ default=".",
+ help="Path to pelican source directory to serve. "
+ + "Relative to current directory",
)
- parser.add_argument("port", default=8000, type=int, nargs="?",
- help="Port to Listen On")
- parser.add_argument("server", default="", nargs="?",
- help="Interface to Listen On")
- parser.add_argument('--ssl', action="store_true",
- help='Activate SSL listener')
- parser.add_argument('--cert', default="./cert.pem", nargs="?",
- help='Path to certificate file. ' +
- 'Relative to current directory')
- parser.add_argument('--key', default="./key.pem", nargs="?",
- help='Path to certificate key file. ' +
- 'Relative to current directory')
- parser.add_argument('--path', default=".",
- help='Path to pelican source directory to serve. ' +
- 'Relative to current directory')
return parser.parse_args()
class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
- SUFFIXES = ['.html', '/index.html', '/', '']
+ SUFFIXES = [".html", "/index.html", "/", ""]
extensions_map = {
**server.SimpleHTTPRequestHandler.extensions_map,
- ** {
+ **{
# web fonts
".oft": "font/oft",
".sfnt": "font/sfnt",
@@ -57,13 +66,13 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def translate_path(self, path):
# abandon query parameters
- path = path.split('?', 1)[0]
- path = path.split('#', 1)[0]
+ path = path.split("?", 1)[0]
+ path = path.split("#", 1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
- trailing_slash = path.rstrip().endswith('/')
+ trailing_slash = path.rstrip().endswith("/")
path = urllib.parse.unquote(path)
path = posixpath.normpath(path)
- words = path.split('/')
+ words = path.split("/")
words = filter(None, words)
path = self.base_path
for word in words:
@@ -72,12 +81,12 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
continue
path = os.path.join(path, word)
if trailing_slash:
- path += '/'
+ path += "/"
return path
def do_GET(self):
# cut off a query string
- original_path = self.path.split('?', 1)[0]
+ original_path = self.path.split("?", 1)[0]
# try to find file
self.path = self.get_path_that_exists(original_path)
@@ -88,12 +97,12 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def get_path_that_exists(self, original_path):
# Try to strip trailing slash
- trailing_slash = original_path.endswith('/')
- original_path = original_path.rstrip('/')
+ trailing_slash = original_path.endswith("/")
+ original_path = original_path.rstrip("/")
# Try to detect file by applying various suffixes
tries = []
for suffix in self.SUFFIXES:
- if not trailing_slash and suffix == '/':
+ if not trailing_slash and suffix == "/":
# if original request does not have trailing slash, skip the '/' suffix
# so that base class can redirect if needed
continue
@@ -101,18 +110,17 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
if os.path.exists(self.translate_path(path)):
return path
tries.append(path)
- logger.warning("Unable to find `%s` or variations:\n%s",
- original_path,
- '\n'.join(tries))
+ logger.warning(
+ "Unable to find `%s` or variations:\n%s", original_path, "\n".join(tries)
+ )
return None
def guess_type(self, path):
- """Guess at the mime type for the specified file.
- """
+ """Guess at the mime type for the specified file."""
mimetype = server.SimpleHTTPRequestHandler.guess_type(self, path)
# If the default guess is too generic, try the python-magic library
- if mimetype == 'application/octet-stream' and magic_from_file:
+ if mimetype == "application/octet-stream" and magic_from_file:
mimetype = magic_from_file(path, mime=True)
return mimetype
@@ -127,31 +135,33 @@ class RootedHTTPServer(server.HTTPServer):
self.RequestHandlerClass.base_path = base_path
-if __name__ == '__main__':
+if __name__ == "__main__":
init_logging(level=logging.INFO)
- logger.warning("'python -m pelican.server' is deprecated.\nThe "
- "Pelican development server should be run via "
- "'pelican --listen' or 'pelican -l'.\nThis can be combined "
- "with regeneration as 'pelican -lr'.\nRerun 'pelican-"
- "quickstart' to get new Makefile and tasks.py files.")
+ logger.warning(
+ "'python -m pelican.server' is deprecated.\nThe "
+ "Pelican development server should be run via "
+ "'pelican --listen' or 'pelican -l'.\nThis can be combined "
+ "with regeneration as 'pelican -lr'.\nRerun 'pelican-"
+ "quickstart' to get new Makefile and tasks.py files."
+ )
args = parse_arguments()
RootedHTTPServer.allow_reuse_address = True
try:
httpd = RootedHTTPServer(
- args.path, (args.server, args.port), ComplexHTTPRequestHandler)
+ args.path, (args.server, args.port), ComplexHTTPRequestHandler
+ )
if args.ssl:
httpd.socket = ssl.wrap_socket(
- httpd.socket, keyfile=args.key,
- certfile=args.cert, server_side=True)
+ httpd.socket, keyfile=args.key, certfile=args.cert, server_side=True
+ )
except ssl.SSLError as e:
- logger.error("Couldn't open certificate file %s or key file %s",
- args.cert, args.key)
- logger.error("Could not listen on port %s, server %s.",
- args.port, args.server)
- sys.exit(getattr(e, 'exitcode', 1))
+ logger.error(
+ "Couldn't open certificate file %s or key file %s", args.cert, args.key
+ )
+ logger.error("Could not listen on port %s, server %s.", args.port, args.server)
+ sys.exit(getattr(e, "exitcode", 1))
- logger.info("Serving at port %s, server %s.",
- args.port, args.server)
+ logger.info("Serving at port %s, server %s.", args.port, args.server)
try:
httpd.serve_forever()
except KeyboardInterrupt:
diff --git a/pelican/settings.py b/pelican/settings.py
index 9a54b2a6..2c84b6f0 100644
--- a/pelican/settings.py
+++ b/pelican/settings.py
@@ -22,150 +22,157 @@ def load_source(name, path):
logger = logging.getLogger(__name__)
-DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
- 'themes', 'notmyidea')
+DEFAULT_THEME = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "themes", "notmyidea"
+)
DEFAULT_CONFIG = {
- 'PATH': os.curdir,
- 'ARTICLE_PATHS': [''],
- 'ARTICLE_EXCLUDES': [],
- 'PAGE_PATHS': ['pages'],
- 'PAGE_EXCLUDES': [],
- 'THEME': DEFAULT_THEME,
- 'OUTPUT_PATH': 'output',
- 'READERS': {},
- 'STATIC_PATHS': ['images'],
- 'STATIC_EXCLUDES': [],
- 'STATIC_EXCLUDE_SOURCES': True,
- 'THEME_STATIC_DIR': 'theme',
- 'THEME_STATIC_PATHS': ['static', ],
- 'FEED_ALL_ATOM': 'feeds/all.atom.xml',
- 'CATEGORY_FEED_ATOM': 'feeds/{slug}.atom.xml',
- 'AUTHOR_FEED_ATOM': 'feeds/{slug}.atom.xml',
- 'AUTHOR_FEED_RSS': 'feeds/{slug}.rss.xml',
- 'TRANSLATION_FEED_ATOM': 'feeds/all-{lang}.atom.xml',
- 'FEED_MAX_ITEMS': 100,
- 'RSS_FEED_SUMMARY_ONLY': True,
- 'SITEURL': '',
- 'SITENAME': 'A Pelican Blog',
- 'DISPLAY_PAGES_ON_MENU': True,
- 'DISPLAY_CATEGORIES_ON_MENU': True,
- 'DOCUTILS_SETTINGS': {},
- 'OUTPUT_SOURCES': False,
- 'OUTPUT_SOURCES_EXTENSION': '.text',
- 'USE_FOLDER_AS_CATEGORY': True,
- 'DEFAULT_CATEGORY': 'misc',
- 'WITH_FUTURE_DATES': True,
- 'CSS_FILE': 'main.css',
- 'NEWEST_FIRST_ARCHIVES': True,
- 'REVERSE_CATEGORY_ORDER': False,
- 'DELETE_OUTPUT_DIRECTORY': False,
- 'OUTPUT_RETENTION': [],
- 'INDEX_SAVE_AS': 'index.html',
- 'ARTICLE_URL': '{slug}.html',
- 'ARTICLE_SAVE_AS': '{slug}.html',
- 'ARTICLE_ORDER_BY': 'reversed-date',
- 'ARTICLE_LANG_URL': '{slug}-{lang}.html',
- 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',
- 'DRAFT_URL': 'drafts/{slug}.html',
- 'DRAFT_SAVE_AS': 'drafts/{slug}.html',
- 'DRAFT_LANG_URL': 'drafts/{slug}-{lang}.html',
- 'DRAFT_LANG_SAVE_AS': 'drafts/{slug}-{lang}.html',
- 'PAGE_URL': 'pages/{slug}.html',
- 'PAGE_SAVE_AS': 'pages/{slug}.html',
- 'PAGE_ORDER_BY': 'basename',
- 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',
- 'PAGE_LANG_SAVE_AS': 'pages/{slug}-{lang}.html',
- 'DRAFT_PAGE_URL': 'drafts/pages/{slug}.html',
- 'DRAFT_PAGE_SAVE_AS': 'drafts/pages/{slug}.html',
- 'DRAFT_PAGE_LANG_URL': 'drafts/pages/{slug}-{lang}.html',
- 'DRAFT_PAGE_LANG_SAVE_AS': 'drafts/pages/{slug}-{lang}.html',
- 'STATIC_URL': '{path}',
- 'STATIC_SAVE_AS': '{path}',
- 'STATIC_CREATE_LINKS': False,
- 'STATIC_CHECK_IF_MODIFIED': False,
- 'CATEGORY_URL': 'category/{slug}.html',
- 'CATEGORY_SAVE_AS': 'category/{slug}.html',
- 'TAG_URL': 'tag/{slug}.html',
- 'TAG_SAVE_AS': 'tag/{slug}.html',
- 'AUTHOR_URL': 'author/{slug}.html',
- 'AUTHOR_SAVE_AS': 'author/{slug}.html',
- 'PAGINATION_PATTERNS': [
- (1, '{name}{extension}', '{name}{extension}'),
- (2, '{name}{number}{extension}', '{name}{number}{extension}'),
+ "PATH": os.curdir,
+ "ARTICLE_PATHS": [""],
+ "ARTICLE_EXCLUDES": [],
+ "PAGE_PATHS": ["pages"],
+ "PAGE_EXCLUDES": [],
+ "THEME": DEFAULT_THEME,
+ "OUTPUT_PATH": "output",
+ "READERS": {},
+ "STATIC_PATHS": ["images"],
+ "STATIC_EXCLUDES": [],
+ "STATIC_EXCLUDE_SOURCES": True,
+ "THEME_STATIC_DIR": "theme",
+ "THEME_STATIC_PATHS": [
+ "static",
],
- 'YEAR_ARCHIVE_URL': '',
- 'YEAR_ARCHIVE_SAVE_AS': '',
- 'MONTH_ARCHIVE_URL': '',
- 'MONTH_ARCHIVE_SAVE_AS': '',
- 'DAY_ARCHIVE_URL': '',
- 'DAY_ARCHIVE_SAVE_AS': '',
- 'RELATIVE_URLS': False,
- 'DEFAULT_LANG': 'en',
- 'ARTICLE_TRANSLATION_ID': 'slug',
- 'PAGE_TRANSLATION_ID': 'slug',
- 'DIRECT_TEMPLATES': ['index', 'tags', 'categories', 'authors', 'archives'],
- 'THEME_TEMPLATES_OVERRIDES': [],
- 'PAGINATED_TEMPLATES': {'index': None, 'tag': None, 'category': None,
- 'author': None},
- 'PELICAN_CLASS': 'pelican.Pelican',
- 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',
- 'DATE_FORMATS': {},
- 'MARKDOWN': {
- 'extension_configs': {
- 'markdown.extensions.codehilite': {'css_class': 'highlight'},
- 'markdown.extensions.extra': {},
- 'markdown.extensions.meta': {},
+ "FEED_ALL_ATOM": "feeds/all.atom.xml",
+ "CATEGORY_FEED_ATOM": "feeds/{slug}.atom.xml",
+ "AUTHOR_FEED_ATOM": "feeds/{slug}.atom.xml",
+ "AUTHOR_FEED_RSS": "feeds/{slug}.rss.xml",
+ "TRANSLATION_FEED_ATOM": "feeds/all-{lang}.atom.xml",
+ "FEED_MAX_ITEMS": 100,
+ "RSS_FEED_SUMMARY_ONLY": True,
+ "SITEURL": "",
+ "SITENAME": "A Pelican Blog",
+ "DISPLAY_PAGES_ON_MENU": True,
+ "DISPLAY_CATEGORIES_ON_MENU": True,
+ "DOCUTILS_SETTINGS": {},
+ "OUTPUT_SOURCES": False,
+ "OUTPUT_SOURCES_EXTENSION": ".text",
+ "USE_FOLDER_AS_CATEGORY": True,
+ "DEFAULT_CATEGORY": "misc",
+ "WITH_FUTURE_DATES": True,
+ "CSS_FILE": "main.css",
+ "NEWEST_FIRST_ARCHIVES": True,
+ "REVERSE_CATEGORY_ORDER": False,
+ "DELETE_OUTPUT_DIRECTORY": False,
+ "OUTPUT_RETENTION": [],
+ "INDEX_SAVE_AS": "index.html",
+ "ARTICLE_URL": "{slug}.html",
+ "ARTICLE_SAVE_AS": "{slug}.html",
+ "ARTICLE_ORDER_BY": "reversed-date",
+ "ARTICLE_LANG_URL": "{slug}-{lang}.html",
+ "ARTICLE_LANG_SAVE_AS": "{slug}-{lang}.html",
+ "DRAFT_URL": "drafts/{slug}.html",
+ "DRAFT_SAVE_AS": "drafts/{slug}.html",
+ "DRAFT_LANG_URL": "drafts/{slug}-{lang}.html",
+ "DRAFT_LANG_SAVE_AS": "drafts/{slug}-{lang}.html",
+ "PAGE_URL": "pages/{slug}.html",
+ "PAGE_SAVE_AS": "pages/{slug}.html",
+ "PAGE_ORDER_BY": "basename",
+ "PAGE_LANG_URL": "pages/{slug}-{lang}.html",
+ "PAGE_LANG_SAVE_AS": "pages/{slug}-{lang}.html",
+ "DRAFT_PAGE_URL": "drafts/pages/{slug}.html",
+ "DRAFT_PAGE_SAVE_AS": "drafts/pages/{slug}.html",
+ "DRAFT_PAGE_LANG_URL": "drafts/pages/{slug}-{lang}.html",
+ "DRAFT_PAGE_LANG_SAVE_AS": "drafts/pages/{slug}-{lang}.html",
+ "STATIC_URL": "{path}",
+ "STATIC_SAVE_AS": "{path}",
+ "STATIC_CREATE_LINKS": False,
+ "STATIC_CHECK_IF_MODIFIED": False,
+ "CATEGORY_URL": "category/{slug}.html",
+ "CATEGORY_SAVE_AS": "category/{slug}.html",
+ "TAG_URL": "tag/{slug}.html",
+ "TAG_SAVE_AS": "tag/{slug}.html",
+ "AUTHOR_URL": "author/{slug}.html",
+ "AUTHOR_SAVE_AS": "author/{slug}.html",
+ "PAGINATION_PATTERNS": [
+ (1, "{name}{extension}", "{name}{extension}"),
+ (2, "{name}{number}{extension}", "{name}{number}{extension}"),
+ ],
+ "YEAR_ARCHIVE_URL": "",
+ "YEAR_ARCHIVE_SAVE_AS": "",
+ "MONTH_ARCHIVE_URL": "",
+ "MONTH_ARCHIVE_SAVE_AS": "",
+ "DAY_ARCHIVE_URL": "",
+ "DAY_ARCHIVE_SAVE_AS": "",
+ "RELATIVE_URLS": False,
+ "DEFAULT_LANG": "en",
+ "ARTICLE_TRANSLATION_ID": "slug",
+ "PAGE_TRANSLATION_ID": "slug",
+ "DIRECT_TEMPLATES": ["index", "tags", "categories", "authors", "archives"],
+ "THEME_TEMPLATES_OVERRIDES": [],
+ "PAGINATED_TEMPLATES": {
+ "index": None,
+ "tag": None,
+ "category": None,
+ "author": None,
+ },
+ "PELICAN_CLASS": "pelican.Pelican",
+ "DEFAULT_DATE_FORMAT": "%a %d %B %Y",
+ "DATE_FORMATS": {},
+ "MARKDOWN": {
+ "extension_configs": {
+ "markdown.extensions.codehilite": {"css_class": "highlight"},
+ "markdown.extensions.extra": {},
+ "markdown.extensions.meta": {},
},
- 'output_format': 'html5',
+ "output_format": "html5",
},
- 'JINJA_FILTERS': {},
- 'JINJA_GLOBALS': {},
- 'JINJA_TESTS': {},
- 'JINJA_ENVIRONMENT': {
- 'trim_blocks': True,
- 'lstrip_blocks': True,
- 'extensions': [],
+ "JINJA_FILTERS": {},
+ "JINJA_GLOBALS": {},
+ "JINJA_TESTS": {},
+ "JINJA_ENVIRONMENT": {
+ "trim_blocks": True,
+ "lstrip_blocks": True,
+ "extensions": [],
},
- 'LOG_FILTER': [],
- 'LOCALE': [''], # defaults to user locale
- 'DEFAULT_PAGINATION': False,
- 'DEFAULT_ORPHANS': 0,
- 'DEFAULT_METADATA': {},
- 'FILENAME_METADATA': r'(?P\d{4}-\d{2}-\d{2}).*',
- 'PATH_METADATA': '',
- 'EXTRA_PATH_METADATA': {},
- 'ARTICLE_PERMALINK_STRUCTURE': '',
- 'TYPOGRIFY': False,
- 'TYPOGRIFY_IGNORE_TAGS': [],
- 'TYPOGRIFY_DASHES': 'default',
- 'SUMMARY_END_SUFFIX': '…',
- 'SUMMARY_MAX_LENGTH': 50,
- 'PLUGIN_PATHS': [],
- 'PLUGINS': None,
- 'PYGMENTS_RST_OPTIONS': {},
- 'TEMPLATE_PAGES': {},
- 'TEMPLATE_EXTENSIONS': ['.html'],
- 'IGNORE_FILES': ['.#*'],
- 'SLUG_REGEX_SUBSTITUTIONS': [
- (r'[^\w\s-]', ''), # remove non-alphabetical/whitespace/'-' chars
- (r'(?u)\A\s*', ''), # strip leading whitespace
- (r'(?u)\s*\Z', ''), # strip trailing whitespace
- (r'[-\s]+', '-'), # reduce multiple whitespace or '-' to single '-'
+ "LOG_FILTER": [],
+ "LOCALE": [""], # defaults to user locale
+ "DEFAULT_PAGINATION": False,
+ "DEFAULT_ORPHANS": 0,
+ "DEFAULT_METADATA": {},
+ "FILENAME_METADATA": r"(?P\d{4}-\d{2}-\d{2}).*",
+ "PATH_METADATA": "",
+ "EXTRA_PATH_METADATA": {},
+ "ARTICLE_PERMALINK_STRUCTURE": "",
+ "TYPOGRIFY": False,
+ "TYPOGRIFY_IGNORE_TAGS": [],
+ "TYPOGRIFY_DASHES": "default",
+ "SUMMARY_END_SUFFIX": "…",
+ "SUMMARY_MAX_LENGTH": 50,
+ "PLUGIN_PATHS": [],
+ "PLUGINS": None,
+ "PYGMENTS_RST_OPTIONS": {},
+ "TEMPLATE_PAGES": {},
+ "TEMPLATE_EXTENSIONS": [".html"],
+ "IGNORE_FILES": [".#*"],
+ "SLUG_REGEX_SUBSTITUTIONS": [
+ (r"[^\w\s-]", ""), # remove non-alphabetical/whitespace/'-' chars
+ (r"(?u)\A\s*", ""), # strip leading whitespace
+ (r"(?u)\s*\Z", ""), # strip trailing whitespace
+ (r"[-\s]+", "-"), # reduce multiple whitespace or '-' to single '-'
],
- 'INTRASITE_LINK_REGEX': '[{|](?P.*?)[|}]',
- 'SLUGIFY_SOURCE': 'title',
- 'SLUGIFY_USE_UNICODE': False,
- 'SLUGIFY_PRESERVE_CASE': False,
- 'CACHE_CONTENT': False,
- 'CONTENT_CACHING_LAYER': 'reader',
- 'CACHE_PATH': 'cache',
- 'GZIP_CACHE': True,
- 'CHECK_MODIFIED_METHOD': 'mtime',
- 'LOAD_CONTENT_CACHE': False,
- 'WRITE_SELECTED': [],
- 'FORMATTED_FIELDS': ['summary'],
- 'PORT': 8000,
- 'BIND': '127.0.0.1',
+ "INTRASITE_LINK_REGEX": "[{|](?P.*?)[|}]",
+ "SLUGIFY_SOURCE": "title",
+ "SLUGIFY_USE_UNICODE": False,
+ "SLUGIFY_PRESERVE_CASE": False,
+ "CACHE_CONTENT": False,
+ "CONTENT_CACHING_LAYER": "reader",
+ "CACHE_PATH": "cache",
+ "GZIP_CACHE": True,
+ "CHECK_MODIFIED_METHOD": "mtime",
+ "LOAD_CONTENT_CACHE": False,
+ "WRITE_SELECTED": [],
+ "FORMATTED_FIELDS": ["summary"],
+ "PORT": 8000,
+ "BIND": "127.0.0.1",
}
PYGMENTS_RST_OPTIONS = None
@@ -185,20 +192,23 @@ def read_settings(path=None, override=None):
def getabs(maybe_relative, base_path=path):
if isabs(maybe_relative):
return maybe_relative
- return os.path.abspath(os.path.normpath(os.path.join(
- os.path.dirname(base_path), maybe_relative)))
+ return os.path.abspath(
+ os.path.normpath(
+ os.path.join(os.path.dirname(base_path), maybe_relative)
+ )
+ )
- for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'CACHE_PATH']:
+ for p in ["PATH", "OUTPUT_PATH", "THEME", "CACHE_PATH"]:
if settings.get(p) is not None:
absp = getabs(settings[p])
# THEME may be a name rather than a path
- if p != 'THEME' or os.path.exists(absp):
+ if p != "THEME" or os.path.exists(absp):
settings[p] = absp
- if settings.get('PLUGIN_PATHS') is not None:
- settings['PLUGIN_PATHS'] = [getabs(pluginpath)
- for pluginpath
- in settings['PLUGIN_PATHS']]
+ if settings.get("PLUGIN_PATHS") is not None:
+ settings["PLUGIN_PATHS"] = [
+ getabs(pluginpath) for pluginpath in settings["PLUGIN_PATHS"]
+ ]
settings = dict(copy.deepcopy(DEFAULT_CONFIG), **settings)
settings = configure_settings(settings)
@@ -208,7 +218,7 @@ def read_settings(path=None, override=None):
# variable here that we'll import from within Pygments.run (see
# rstdirectives.py) to see what the user defaults were.
global PYGMENTS_RST_OPTIONS
- PYGMENTS_RST_OPTIONS = settings.get('PYGMENTS_RST_OPTIONS', None)
+ PYGMENTS_RST_OPTIONS = settings.get("PYGMENTS_RST_OPTIONS", None)
return settings
@@ -217,8 +227,7 @@ def get_settings_from_module(module=None):
context = {}
if module is not None:
- context.update(
- (k, v) for k, v in inspect.getmembers(module) if k.isupper())
+ context.update((k, v) for k, v in inspect.getmembers(module) if k.isupper())
return context
@@ -233,11 +242,12 @@ def get_settings_from_file(path):
def get_jinja_environment(settings):
"""Sets the environment for Jinja"""
- jinja_env = settings.setdefault('JINJA_ENVIRONMENT',
- DEFAULT_CONFIG['JINJA_ENVIRONMENT'])
+ jinja_env = settings.setdefault(
+ "JINJA_ENVIRONMENT", DEFAULT_CONFIG["JINJA_ENVIRONMENT"]
+ )
# Make sure we include the defaults if the user has set env variables
- for key, value in DEFAULT_CONFIG['JINJA_ENVIRONMENT'].items():
+ for key, value in DEFAULT_CONFIG["JINJA_ENVIRONMENT"].items():
if key not in jinja_env:
jinja_env[key] = value
@@ -248,14 +258,14 @@ def _printf_s_to_format_field(printf_string, format_field):
"""Tries to replace %s with {format_field} in the provided printf_string.
Raises ValueError in case of failure.
"""
- TEST_STRING = 'PELICAN_PRINTF_S_DEPRECATION'
+ TEST_STRING = "PELICAN_PRINTF_S_DEPRECATION"
expected = printf_string % TEST_STRING
- result = printf_string.replace('{', '{{').replace('}', '}}') \
- % '{{{}}}'.format(format_field)
+ result = printf_string.replace("{", "{{").replace("}", "}}") % "{{{}}}".format(
+ format_field
+ )
if result.format(**{format_field: TEST_STRING}) != expected:
- raise ValueError('Failed to safely replace %s with {{{}}}'.format(
- format_field))
+ raise ValueError("Failed to safely replace %s with {{{}}}".format(format_field))
return result
@@ -266,115 +276,140 @@ def handle_deprecated_settings(settings):
"""
# PLUGIN_PATH -> PLUGIN_PATHS
- if 'PLUGIN_PATH' in settings:
- logger.warning('PLUGIN_PATH setting has been replaced by '
- 'PLUGIN_PATHS, moving it to the new setting name.')
- settings['PLUGIN_PATHS'] = settings['PLUGIN_PATH']
- del settings['PLUGIN_PATH']
+ if "PLUGIN_PATH" in settings:
+ logger.warning(
+ "PLUGIN_PATH setting has been replaced by "
+ "PLUGIN_PATHS, moving it to the new setting name."
+ )
+ settings["PLUGIN_PATHS"] = settings["PLUGIN_PATH"]
+ del settings["PLUGIN_PATH"]
# PLUGIN_PATHS: str -> [str]
- if isinstance(settings.get('PLUGIN_PATHS'), str):
- logger.warning("Defining PLUGIN_PATHS setting as string "
- "has been deprecated (should be a list)")
- settings['PLUGIN_PATHS'] = [settings['PLUGIN_PATHS']]
+ if isinstance(settings.get("PLUGIN_PATHS"), str):
+ logger.warning(
+ "Defining PLUGIN_PATHS setting as string "
+ "has been deprecated (should be a list)"
+ )
+ settings["PLUGIN_PATHS"] = [settings["PLUGIN_PATHS"]]
# JINJA_EXTENSIONS -> JINJA_ENVIRONMENT > extensions
- if 'JINJA_EXTENSIONS' in settings:
- logger.warning('JINJA_EXTENSIONS setting has been deprecated, '
- 'moving it to JINJA_ENVIRONMENT setting.')
- settings['JINJA_ENVIRONMENT']['extensions'] = \
- settings['JINJA_EXTENSIONS']
- del settings['JINJA_EXTENSIONS']
+ if "JINJA_EXTENSIONS" in settings:
+ logger.warning(
+ "JINJA_EXTENSIONS setting has been deprecated, "
+ "moving it to JINJA_ENVIRONMENT setting."
+ )
+ settings["JINJA_ENVIRONMENT"]["extensions"] = settings["JINJA_EXTENSIONS"]
+ del settings["JINJA_EXTENSIONS"]
# {ARTICLE,PAGE}_DIR -> {ARTICLE,PAGE}_PATHS
- for key in ['ARTICLE', 'PAGE']:
- old_key = key + '_DIR'
- new_key = key + '_PATHS'
+ for key in ["ARTICLE", "PAGE"]:
+ old_key = key + "_DIR"
+ new_key = key + "_PATHS"
if old_key in settings:
logger.warning(
- 'Deprecated setting %s, moving it to %s list',
- old_key, new_key)
- settings[new_key] = [settings[old_key]] # also make a list
+ "Deprecated setting %s, moving it to %s list", old_key, new_key
+ )
+ settings[new_key] = [settings[old_key]] # also make a list
del settings[old_key]
# EXTRA_TEMPLATES_PATHS -> THEME_TEMPLATES_OVERRIDES
- if 'EXTRA_TEMPLATES_PATHS' in settings:
- logger.warning('EXTRA_TEMPLATES_PATHS is deprecated use '
- 'THEME_TEMPLATES_OVERRIDES instead.')
- if ('THEME_TEMPLATES_OVERRIDES' in settings and
- settings['THEME_TEMPLATES_OVERRIDES']):
+ if "EXTRA_TEMPLATES_PATHS" in settings:
+ logger.warning(
+ "EXTRA_TEMPLATES_PATHS is deprecated use "
+ "THEME_TEMPLATES_OVERRIDES instead."
+ )
+ if (
+ "THEME_TEMPLATES_OVERRIDES" in settings
+ and settings["THEME_TEMPLATES_OVERRIDES"]
+ ):
raise Exception(
- 'Setting both EXTRA_TEMPLATES_PATHS and '
- 'THEME_TEMPLATES_OVERRIDES is not permitted. Please move to '
- 'only setting THEME_TEMPLATES_OVERRIDES.')
- settings['THEME_TEMPLATES_OVERRIDES'] = \
- settings['EXTRA_TEMPLATES_PATHS']
- del settings['EXTRA_TEMPLATES_PATHS']
+ "Setting both EXTRA_TEMPLATES_PATHS and "
+ "THEME_TEMPLATES_OVERRIDES is not permitted. Please move to "
+ "only setting THEME_TEMPLATES_OVERRIDES."
+ )
+ settings["THEME_TEMPLATES_OVERRIDES"] = settings["EXTRA_TEMPLATES_PATHS"]
+ del settings["EXTRA_TEMPLATES_PATHS"]
# MD_EXTENSIONS -> MARKDOWN
- if 'MD_EXTENSIONS' in settings:
- logger.warning('MD_EXTENSIONS is deprecated use MARKDOWN '
- 'instead. Falling back to the default.')
- settings['MARKDOWN'] = DEFAULT_CONFIG['MARKDOWN']
+ if "MD_EXTENSIONS" in settings:
+ logger.warning(
+ "MD_EXTENSIONS is deprecated use MARKDOWN "
+ "instead. Falling back to the default."
+ )
+ settings["MARKDOWN"] = DEFAULT_CONFIG["MARKDOWN"]
# LESS_GENERATOR -> Webassets plugin
# FILES_TO_COPY -> STATIC_PATHS, EXTRA_PATH_METADATA
for old, new, doc in [
- ('LESS_GENERATOR', 'the Webassets plugin', None),
- ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',
- 'https://github.com/getpelican/pelican/'
- 'blob/master/docs/settings.rst#path-metadata'),
+ ("LESS_GENERATOR", "the Webassets plugin", None),
+ (
+ "FILES_TO_COPY",
+ "STATIC_PATHS and EXTRA_PATH_METADATA",
+ "https://github.com/getpelican/pelican/"
+ "blob/master/docs/settings.rst#path-metadata",
+ ),
]:
if old in settings:
- message = 'The {} setting has been removed in favor of {}'.format(
- old, new)
+ message = "The {} setting has been removed in favor of {}".format(old, new)
if doc:
- message += ', see {} for details'.format(doc)
+ message += ", see {} for details".format(doc)
logger.warning(message)
# PAGINATED_DIRECT_TEMPLATES -> PAGINATED_TEMPLATES
- if 'PAGINATED_DIRECT_TEMPLATES' in settings:
- message = 'The {} setting has been removed in favor of {}'.format(
- 'PAGINATED_DIRECT_TEMPLATES', 'PAGINATED_TEMPLATES')
+ if "PAGINATED_DIRECT_TEMPLATES" in settings:
+ message = "The {} setting has been removed in favor of {}".format(
+ "PAGINATED_DIRECT_TEMPLATES", "PAGINATED_TEMPLATES"
+ )
logger.warning(message)
# set PAGINATED_TEMPLATES
- if 'PAGINATED_TEMPLATES' not in settings:
- settings['PAGINATED_TEMPLATES'] = {
- 'tag': None, 'category': None, 'author': None}
+ if "PAGINATED_TEMPLATES" not in settings:
+ settings["PAGINATED_TEMPLATES"] = {
+ "tag": None,
+ "category": None,
+ "author": None,
+ }
- for t in settings['PAGINATED_DIRECT_TEMPLATES']:
- if t not in settings['PAGINATED_TEMPLATES']:
- settings['PAGINATED_TEMPLATES'][t] = None
- del settings['PAGINATED_DIRECT_TEMPLATES']
+ for t in settings["PAGINATED_DIRECT_TEMPLATES"]:
+ if t not in settings["PAGINATED_TEMPLATES"]:
+ settings["PAGINATED_TEMPLATES"][t] = None
+ del settings["PAGINATED_DIRECT_TEMPLATES"]
# {SLUG,CATEGORY,TAG,AUTHOR}_SUBSTITUTIONS ->
# {SLUG,CATEGORY,TAG,AUTHOR}_REGEX_SUBSTITUTIONS
- url_settings_url = \
- 'http://docs.getpelican.com/en/latest/settings.html#url-settings'
- flavours = {'SLUG', 'CATEGORY', 'TAG', 'AUTHOR'}
- old_values = {f: settings[f + '_SUBSTITUTIONS']
- for f in flavours if f + '_SUBSTITUTIONS' in settings}
- new_values = {f: settings[f + '_REGEX_SUBSTITUTIONS']
- for f in flavours if f + '_REGEX_SUBSTITUTIONS' in settings}
+ url_settings_url = "http://docs.getpelican.com/en/latest/settings.html#url-settings"
+ flavours = {"SLUG", "CATEGORY", "TAG", "AUTHOR"}
+ old_values = {
+ f: settings[f + "_SUBSTITUTIONS"]
+ for f in flavours
+ if f + "_SUBSTITUTIONS" in settings
+ }
+ new_values = {
+ f: settings[f + "_REGEX_SUBSTITUTIONS"]
+ for f in flavours
+ if f + "_REGEX_SUBSTITUTIONS" in settings
+ }
if old_values and new_values:
raise Exception(
- 'Setting both {new_key} and {old_key} (or variants thereof) is '
- 'not permitted. Please move to only setting {new_key}.'
- .format(old_key='SLUG_SUBSTITUTIONS',
- new_key='SLUG_REGEX_SUBSTITUTIONS'))
+ "Setting both {new_key} and {old_key} (or variants thereof) is "
+ "not permitted. Please move to only setting {new_key}.".format(
+ old_key="SLUG_SUBSTITUTIONS", new_key="SLUG_REGEX_SUBSTITUTIONS"
+ )
+ )
if old_values:
- message = ('{} and variants thereof are deprecated and will be '
- 'removed in the future. Please use {} and variants thereof '
- 'instead. Check {}.'
- .format('SLUG_SUBSTITUTIONS', 'SLUG_REGEX_SUBSTITUTIONS',
- url_settings_url))
+ message = (
+ "{} and variants thereof are deprecated and will be "
+ "removed in the future. Please use {} and variants thereof "
+ "instead. Check {}.".format(
+ "SLUG_SUBSTITUTIONS", "SLUG_REGEX_SUBSTITUTIONS", url_settings_url
+ )
+ )
logger.warning(message)
- if old_values.get('SLUG'):
- for f in {'CATEGORY', 'TAG'}:
+ if old_values.get("SLUG"):
+ for f in {"CATEGORY", "TAG"}:
if old_values.get(f):
- old_values[f] = old_values['SLUG'] + old_values[f]
- old_values['AUTHOR'] = old_values.get('AUTHOR', [])
+ old_values[f] = old_values["SLUG"] + old_values[f]
+ old_values["AUTHOR"] = old_values.get("AUTHOR", [])
for f in flavours:
if old_values.get(f) is not None:
regex_subs = []
@@ -387,120 +422,138 @@ def handle_deprecated_settings(settings):
replace = False
except ValueError:
src, dst = tpl
- regex_subs.append(
- (re.escape(src), dst.replace('\\', r'\\')))
+ regex_subs.append((re.escape(src), dst.replace("\\", r"\\")))
if replace:
regex_subs += [
- (r'[^\w\s-]', ''),
- (r'(?u)\A\s*', ''),
- (r'(?u)\s*\Z', ''),
- (r'[-\s]+', '-'),
+ (r"[^\w\s-]", ""),
+ (r"(?u)\A\s*", ""),
+ (r"(?u)\s*\Z", ""),
+ (r"[-\s]+", "-"),
]
else:
regex_subs += [
- (r'(?u)\A\s*', ''),
- (r'(?u)\s*\Z', ''),
+ (r"(?u)\A\s*", ""),
+ (r"(?u)\s*\Z", ""),
]
- settings[f + '_REGEX_SUBSTITUTIONS'] = regex_subs
- settings.pop(f + '_SUBSTITUTIONS', None)
+ settings[f + "_REGEX_SUBSTITUTIONS"] = regex_subs
+ settings.pop(f + "_SUBSTITUTIONS", None)
# `%s` -> '{slug}` or `{lang}` in FEED settings
- for key in ['TRANSLATION_FEED_ATOM',
- 'TRANSLATION_FEED_RSS'
- ]:
+ for key in ["TRANSLATION_FEED_ATOM", "TRANSLATION_FEED_RSS"]:
if (
- settings.get(key) and not isinstance(settings[key], Path)
- and '%s' in settings[key]
+ settings.get(key)
+ and not isinstance(settings[key], Path)
+ and "%s" in settings[key]
):
- logger.warning('%%s usage in %s is deprecated, use {lang} '
- 'instead.', key)
+ logger.warning("%%s usage in %s is deprecated, use {lang} " "instead.", key)
try:
- settings[key] = _printf_s_to_format_field(
- settings[key], 'lang')
+ settings[key] = _printf_s_to_format_field(settings[key], "lang")
except ValueError:
- logger.warning('Failed to convert %%s to {lang} for %s. '
- 'Falling back to default.', key)
+ logger.warning(
+ "Failed to convert %%s to {lang} for %s. "
+ "Falling back to default.",
+ key,
+ )
settings[key] = DEFAULT_CONFIG[key]
- for key in ['AUTHOR_FEED_ATOM',
- 'AUTHOR_FEED_RSS',
- 'CATEGORY_FEED_ATOM',
- 'CATEGORY_FEED_RSS',
- 'TAG_FEED_ATOM',
- 'TAG_FEED_RSS',
- ]:
+ for key in [
+ "AUTHOR_FEED_ATOM",
+ "AUTHOR_FEED_RSS",
+ "CATEGORY_FEED_ATOM",
+ "CATEGORY_FEED_RSS",
+ "TAG_FEED_ATOM",
+ "TAG_FEED_RSS",
+ ]:
if (
- settings.get(key) and not isinstance(settings[key], Path)
- and '%s' in settings[key]
+ settings.get(key)
+ and not isinstance(settings[key], Path)
+ and "%s" in settings[key]
):
- logger.warning('%%s usage in %s is deprecated, use {slug} '
- 'instead.', key)
+ logger.warning("%%s usage in %s is deprecated, use {slug} " "instead.", key)
try:
- settings[key] = _printf_s_to_format_field(
- settings[key], 'slug')
+ settings[key] = _printf_s_to_format_field(settings[key], "slug")
except ValueError:
- logger.warning('Failed to convert %%s to {slug} for %s. '
- 'Falling back to default.', key)
+ logger.warning(
+ "Failed to convert %%s to {slug} for %s. "
+ "Falling back to default.",
+ key,
+ )
settings[key] = DEFAULT_CONFIG[key]
# CLEAN_URLS
- if settings.get('CLEAN_URLS', False):
- logger.warning('Found deprecated `CLEAN_URLS` in settings.'
- ' Modifying the following settings for the'
- ' same behaviour.')
+ if settings.get("CLEAN_URLS", False):
+ logger.warning(
+ "Found deprecated `CLEAN_URLS` in settings."
+ " Modifying the following settings for the"
+ " same behaviour."
+ )
- settings['ARTICLE_URL'] = '{slug}/'
- settings['ARTICLE_LANG_URL'] = '{slug}-{lang}/'
- settings['PAGE_URL'] = 'pages/{slug}/'
- settings['PAGE_LANG_URL'] = 'pages/{slug}-{lang}/'
+ settings["ARTICLE_URL"] = "{slug}/"
+ settings["ARTICLE_LANG_URL"] = "{slug}-{lang}/"
+ settings["PAGE_URL"] = "pages/{slug}/"
+ settings["PAGE_LANG_URL"] = "pages/{slug}-{lang}/"
- for setting in ('ARTICLE_URL', 'ARTICLE_LANG_URL', 'PAGE_URL',
- 'PAGE_LANG_URL'):
+ for setting in ("ARTICLE_URL", "ARTICLE_LANG_URL", "PAGE_URL", "PAGE_LANG_URL"):
logger.warning("%s = '%s'", setting, settings[setting])
# AUTORELOAD_IGNORE_CACHE -> --ignore-cache
- if settings.get('AUTORELOAD_IGNORE_CACHE'):
- logger.warning('Found deprecated `AUTORELOAD_IGNORE_CACHE` in '
- 'settings. Use --ignore-cache instead.')
- settings.pop('AUTORELOAD_IGNORE_CACHE')
+ if settings.get("AUTORELOAD_IGNORE_CACHE"):
+ logger.warning(
+ "Found deprecated `AUTORELOAD_IGNORE_CACHE` in "
+ "settings. Use --ignore-cache instead."
+ )
+ settings.pop("AUTORELOAD_IGNORE_CACHE")
# ARTICLE_PERMALINK_STRUCTURE
- if settings.get('ARTICLE_PERMALINK_STRUCTURE', False):
- logger.warning('Found deprecated `ARTICLE_PERMALINK_STRUCTURE` in'
- ' settings. Modifying the following settings for'
- ' the same behaviour.')
+ if settings.get("ARTICLE_PERMALINK_STRUCTURE", False):
+ logger.warning(
+ "Found deprecated `ARTICLE_PERMALINK_STRUCTURE` in"
+ " settings. Modifying the following settings for"
+ " the same behaviour."
+ )
- structure = settings['ARTICLE_PERMALINK_STRUCTURE']
+ structure = settings["ARTICLE_PERMALINK_STRUCTURE"]
# Convert %(variable) into {variable}.
- structure = re.sub(r'%\((\w+)\)s', r'{\g<1>}', structure)
+ structure = re.sub(r"%\((\w+)\)s", r"{\g<1>}", structure)
# Convert %x into {date:%x} for strftime
- structure = re.sub(r'(%[A-z])', r'{date:\g<1>}', structure)
+ structure = re.sub(r"(%[A-z])", r"{date:\g<1>}", structure)
# Strip a / prefix
- structure = re.sub('^/', '', structure)
+ structure = re.sub("^/", "", structure)
- for setting in ('ARTICLE_URL', 'ARTICLE_LANG_URL', 'PAGE_URL',
- 'PAGE_LANG_URL', 'DRAFT_URL', 'DRAFT_LANG_URL',
- 'ARTICLE_SAVE_AS', 'ARTICLE_LANG_SAVE_AS',
- 'DRAFT_SAVE_AS', 'DRAFT_LANG_SAVE_AS',
- 'PAGE_SAVE_AS', 'PAGE_LANG_SAVE_AS'):
- settings[setting] = os.path.join(structure,
- settings[setting])
+ for setting in (
+ "ARTICLE_URL",
+ "ARTICLE_LANG_URL",
+ "PAGE_URL",
+ "PAGE_LANG_URL",
+ "DRAFT_URL",
+ "DRAFT_LANG_URL",
+ "ARTICLE_SAVE_AS",
+ "ARTICLE_LANG_SAVE_AS",
+ "DRAFT_SAVE_AS",
+ "DRAFT_LANG_SAVE_AS",
+ "PAGE_SAVE_AS",
+ "PAGE_LANG_SAVE_AS",
+ ):
+ settings[setting] = os.path.join(structure, settings[setting])
logger.warning("%s = '%s'", setting, settings[setting])
# {,TAG,CATEGORY,TRANSLATION}_FEED -> {,TAG,CATEGORY,TRANSLATION}_FEED_ATOM
- for new, old in [('FEED', 'FEED_ATOM'), ('TAG_FEED', 'TAG_FEED_ATOM'),
- ('CATEGORY_FEED', 'CATEGORY_FEED_ATOM'),
- ('TRANSLATION_FEED', 'TRANSLATION_FEED_ATOM')]:
+ for new, old in [
+ ("FEED", "FEED_ATOM"),
+ ("TAG_FEED", "TAG_FEED_ATOM"),
+ ("CATEGORY_FEED", "CATEGORY_FEED_ATOM"),
+ ("TRANSLATION_FEED", "TRANSLATION_FEED_ATOM"),
+ ]:
if settings.get(new, False):
logger.warning(
- 'Found deprecated `%(new)s` in settings. Modify %(new)s '
- 'to %(old)s in your settings and theme for the same '
- 'behavior. Temporarily setting %(old)s for backwards '
- 'compatibility.',
- {'new': new, 'old': old}
+ "Found deprecated `%(new)s` in settings. Modify %(new)s "
+ "to %(old)s in your settings and theme for the same "
+ "behavior. Temporarily setting %(old)s for backwards "
+ "compatibility.",
+ {"new": new, "old": old},
)
settings[old] = settings[new]
@@ -512,34 +565,34 @@ def configure_settings(settings):
settings.
Also, specify the log messages to be ignored.
"""
- if 'PATH' not in settings or not os.path.isdir(settings['PATH']):
- raise Exception('You need to specify a path containing the content'
- ' (see pelican --help for more information)')
+ if "PATH" not in settings or not os.path.isdir(settings["PATH"]):
+ raise Exception(
+ "You need to specify a path containing the content"
+ " (see pelican --help for more information)"
+ )
# specify the log messages to be ignored
- log_filter = settings.get('LOG_FILTER', DEFAULT_CONFIG['LOG_FILTER'])
+ log_filter = settings.get("LOG_FILTER", DEFAULT_CONFIG["LOG_FILTER"])
LimitFilter._ignore.update(set(log_filter))
# lookup the theme in "pelican/themes" if the given one doesn't exist
- if not os.path.isdir(settings['THEME']):
+ if not os.path.isdir(settings["THEME"]):
theme_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
- 'themes',
- settings['THEME'])
+ os.path.dirname(os.path.abspath(__file__)), "themes", settings["THEME"]
+ )
if os.path.exists(theme_path):
- settings['THEME'] = theme_path
+ settings["THEME"] = theme_path
else:
- raise Exception("Could not find the theme %s"
- % settings['THEME'])
+ raise Exception("Could not find the theme %s" % settings["THEME"])
# make paths selected for writing absolute if necessary
- settings['WRITE_SELECTED'] = [
- os.path.abspath(path) for path in
- settings.get('WRITE_SELECTED', DEFAULT_CONFIG['WRITE_SELECTED'])
+ settings["WRITE_SELECTED"] = [
+ os.path.abspath(path)
+ for path in settings.get("WRITE_SELECTED", DEFAULT_CONFIG["WRITE_SELECTED"])
]
# standardize strings to lowercase strings
- for key in ['DEFAULT_LANG']:
+ for key in ["DEFAULT_LANG"]:
if key in settings:
settings[key] = settings[key].lower()
@@ -547,24 +600,26 @@ def configure_settings(settings):
settings = get_jinja_environment(settings)
# standardize strings to lists
- for key in ['LOCALE']:
+ for key in ["LOCALE"]:
if key in settings and isinstance(settings[key], str):
settings[key] = [settings[key]]
# check settings that must be a particular type
for key, types in [
- ('OUTPUT_SOURCES_EXTENSION', str),
- ('FILENAME_METADATA', str),
+ ("OUTPUT_SOURCES_EXTENSION", str),
+ ("FILENAME_METADATA", str),
]:
if key in settings and not isinstance(settings[key], types):
value = settings.pop(key)
logger.warn(
- 'Detected misconfigured %s (%s), '
- 'falling back to the default (%s)',
- key, value, DEFAULT_CONFIG[key])
+ "Detected misconfigured %s (%s), " "falling back to the default (%s)",
+ key,
+ value,
+ DEFAULT_CONFIG[key],
+ )
# try to set the different locales, fallback on the default.
- locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])
+ locales = settings.get("LOCALE", DEFAULT_CONFIG["LOCALE"])
for locale_ in locales:
try:
@@ -575,95 +630,111 @@ def configure_settings(settings):
else:
logger.warning(
"Locale could not be set. Check the LOCALE setting, ensuring it "
- "is valid and available on your system.")
+ "is valid and available on your system."
+ )
- if ('SITEURL' in settings):
+ if "SITEURL" in settings:
# If SITEURL has a trailing slash, remove it and provide a warning
- siteurl = settings['SITEURL']
- if (siteurl.endswith('/')):
- settings['SITEURL'] = siteurl[:-1]
+ siteurl = settings["SITEURL"]
+ if siteurl.endswith("/"):
+ settings["SITEURL"] = siteurl[:-1]
logger.warning("Removed extraneous trailing slash from SITEURL.")
# If SITEURL is defined but FEED_DOMAIN isn't,
# set FEED_DOMAIN to SITEURL
- if 'FEED_DOMAIN' not in settings:
- settings['FEED_DOMAIN'] = settings['SITEURL']
+ if "FEED_DOMAIN" not in settings:
+ settings["FEED_DOMAIN"] = settings["SITEURL"]
# check content caching layer and warn of incompatibilities
- if settings.get('CACHE_CONTENT', False) and \
- settings.get('CONTENT_CACHING_LAYER', '') == 'generator' and \
- not settings.get('WITH_FUTURE_DATES', True):
+ if (
+ settings.get("CACHE_CONTENT", False)
+ and settings.get("CONTENT_CACHING_LAYER", "") == "generator"
+ and not settings.get("WITH_FUTURE_DATES", True)
+ ):
logger.warning(
"WITH_FUTURE_DATES conflicts with CONTENT_CACHING_LAYER "
- "set to 'generator', use 'reader' layer instead")
+ "set to 'generator', use 'reader' layer instead"
+ )
# Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
feed_keys = [
- 'FEED_ATOM', 'FEED_RSS',
- 'FEED_ALL_ATOM', 'FEED_ALL_RSS',
- 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',
- 'AUTHOR_FEED_ATOM', 'AUTHOR_FEED_RSS',
- 'TAG_FEED_ATOM', 'TAG_FEED_RSS',
- 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',
+ "FEED_ATOM",
+ "FEED_RSS",
+ "FEED_ALL_ATOM",
+ "FEED_ALL_RSS",
+ "CATEGORY_FEED_ATOM",
+ "CATEGORY_FEED_RSS",
+ "AUTHOR_FEED_ATOM",
+ "AUTHOR_FEED_RSS",
+ "TAG_FEED_ATOM",
+ "TAG_FEED_RSS",
+ "TRANSLATION_FEED_ATOM",
+ "TRANSLATION_FEED_RSS",
]
if any(settings.get(k) for k in feed_keys):
- if not settings.get('SITEURL'):
- logger.warning('Feeds generated without SITEURL set properly may'
- ' not be valid')
+ if not settings.get("SITEURL"):
+ logger.warning(
+ "Feeds generated without SITEURL set properly may" " not be valid"
+ )
- if 'TIMEZONE' not in settings:
+ if "TIMEZONE" not in settings:
logger.warning(
- 'No timezone information specified in the settings. Assuming'
- ' your timezone is UTC for feed generation. Check '
- 'https://docs.getpelican.com/en/latest/settings.html#TIMEZONE '
- 'for more information')
+ "No timezone information specified in the settings. Assuming"
+ " your timezone is UTC for feed generation. Check "
+ "https://docs.getpelican.com/en/latest/settings.html#TIMEZONE "
+ "for more information"
+ )
# fix up pagination rules
from pelican.paginator import PaginationRule
+
pagination_rules = [
- PaginationRule(*r) for r in settings.get(
- 'PAGINATION_PATTERNS',
- DEFAULT_CONFIG['PAGINATION_PATTERNS'],
+ PaginationRule(*r)
+ for r in settings.get(
+ "PAGINATION_PATTERNS",
+ DEFAULT_CONFIG["PAGINATION_PATTERNS"],
)
]
- settings['PAGINATION_PATTERNS'] = sorted(
+ settings["PAGINATION_PATTERNS"] = sorted(
pagination_rules,
key=lambda r: r[0],
)
# Save people from accidentally setting a string rather than a list
path_keys = (
- 'ARTICLE_EXCLUDES',
- 'DEFAULT_METADATA',
- 'DIRECT_TEMPLATES',
- 'THEME_TEMPLATES_OVERRIDES',
- 'FILES_TO_COPY',
- 'IGNORE_FILES',
- 'PAGINATED_DIRECT_TEMPLATES',
- 'PLUGINS',
- 'STATIC_EXCLUDES',
- 'STATIC_PATHS',
- 'THEME_STATIC_PATHS',
- 'ARTICLE_PATHS',
- 'PAGE_PATHS',
+ "ARTICLE_EXCLUDES",
+ "DEFAULT_METADATA",
+ "DIRECT_TEMPLATES",
+ "THEME_TEMPLATES_OVERRIDES",
+ "FILES_TO_COPY",
+ "IGNORE_FILES",
+ "PAGINATED_DIRECT_TEMPLATES",
+ "PLUGINS",
+ "STATIC_EXCLUDES",
+ "STATIC_PATHS",
+ "THEME_STATIC_PATHS",
+ "ARTICLE_PATHS",
+ "PAGE_PATHS",
)
for PATH_KEY in filter(lambda k: k in settings, path_keys):
if isinstance(settings[PATH_KEY], str):
- logger.warning("Detected misconfiguration with %s setting "
- "(must be a list), falling back to the default",
- PATH_KEY)
+ logger.warning(
+ "Detected misconfiguration with %s setting "
+ "(must be a list), falling back to the default",
+ PATH_KEY,
+ )
settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]
# Add {PAGE,ARTICLE}_PATHS to {ARTICLE,PAGE}_EXCLUDES
- mutually_exclusive = ('ARTICLE', 'PAGE')
+ mutually_exclusive = ("ARTICLE", "PAGE")
for type_1, type_2 in [mutually_exclusive, mutually_exclusive[::-1]]:
try:
- includes = settings[type_1 + '_PATHS']
- excludes = settings[type_2 + '_EXCLUDES']
+ includes = settings[type_1 + "_PATHS"]
+ excludes = settings[type_2 + "_EXCLUDES"]
for path in includes:
if path not in excludes:
excludes.append(path)
except KeyError:
- continue # setting not specified, nothing to do
+ continue # setting not specified, nothing to do
return settings
diff --git a/pelican/signals.py b/pelican/signals.py
index 9b84a92a..4d232e34 100644
--- a/pelican/signals.py
+++ b/pelican/signals.py
@@ -1,4 +1,4 @@
raise ImportError(
- 'Importing from `pelican.signals` is deprecated. '
- 'Use `from pelican import signals` or `import pelican.plugins.signals` instead.'
+ "Importing from `pelican.signals` is deprecated. "
+ "Use `from pelican import signals` or `import pelican.plugins.signals` instead."
)
diff --git a/pelican/tests/default_conf.py b/pelican/tests/default_conf.py
index 99f3b6cf..583c3253 100644
--- a/pelican/tests/default_conf.py
+++ b/pelican/tests/default_conf.py
@@ -1,43 +1,47 @@
-AUTHOR = 'Alexis Métaireau'
+AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log"
-SITEURL = 'http://blog.notmyidea.org'
-TIMEZONE = 'UTC'
+SITEURL = "http://blog.notmyidea.org"
+TIMEZONE = "UTC"
-GITHUB_URL = 'http://github.com/ametaireau/'
+GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
DEFAULT_PAGINATION = 2
-FEED_RSS = 'feeds/all.rss.xml'
-CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml'
+FEED_RSS = "feeds/all.rss.xml"
+CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
-LINKS = (('Biologeek', 'http://biologeek.org'),
- ('Filyb', "http://filyb.info/"),
- ('Libert-fr', "http://www.libert-fr.com"),
- ('N1k0', "http://prendreuncafe.com/blog/"),
- ('Tarek Ziadé', "http://ziade.org/blog"),
- ('Zubin Mithra', "http://zubin71.wordpress.com/"),)
+LINKS = (
+ ("Biologeek", "http://biologeek.org"),
+ ("Filyb", "http://filyb.info/"),
+ ("Libert-fr", "http://www.libert-fr.com"),
+ ("N1k0", "http://prendreuncafe.com/blog/"),
+ ("Tarek Ziadé", "http://ziade.org/blog"),
+ ("Zubin Mithra", "http://zubin71.wordpress.com/"),
+)
-SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
- ('lastfm', 'http://lastfm.com/user/akounet'),
- ('github', 'http://github.com/ametaireau'),)
+SOCIAL = (
+ ("twitter", "http://twitter.com/ametaireau"),
+ ("lastfm", "http://lastfm.com/user/akounet"),
+ ("github", "http://github.com/ametaireau"),
+)
# global metadata to all the contents
-DEFAULT_METADATA = {'yeah': 'it is'}
+DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata
EXTRA_PATH_METADATA = {
- 'extra/robots.txt': {'path': 'robots.txt'},
+ "extra/robots.txt": {"path": "robots.txt"},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
- 'pictures',
- 'extra/robots.txt',
+ "pictures",
+ "extra/robots.txt",
]
-FORMATTED_FIELDS = ['summary', 'custom_formatted_field']
+FORMATTED_FIELDS = ["summary", "custom_formatted_field"]
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps
diff --git a/pelican/tests/dummy_plugins/namespace_plugin/pelican/plugins/ns_plugin/__init__.py b/pelican/tests/dummy_plugins/namespace_plugin/pelican/plugins/ns_plugin/__init__.py
index c514861d..1979cf09 100644
--- a/pelican/tests/dummy_plugins/namespace_plugin/pelican/plugins/ns_plugin/__init__.py
+++ b/pelican/tests/dummy_plugins/namespace_plugin/pelican/plugins/ns_plugin/__init__.py
@@ -1,4 +1,4 @@
-NAME = 'namespace plugin'
+NAME = "namespace plugin"
def register():
diff --git a/pelican/tests/support.py b/pelican/tests/support.py
index 3e4da785..31b12ce8 100644
--- a/pelican/tests/support.py
+++ b/pelican/tests/support.py
@@ -16,7 +16,10 @@ from pelican.contents import Article
from pelican.readers import default_metadata
from pelican.settings import DEFAULT_CONFIG
-__all__ = ['get_article', 'unittest', ]
+__all__ = [
+ "get_article",
+ "unittest",
+]
@contextmanager
@@ -51,7 +54,7 @@ def isplit(s, sep=None):
True
"""
- sep, hardsep = r'\s+' if sep is None else re.escape(sep), sep is not None
+ sep, hardsep = r"\s+" if sep is None else re.escape(sep), sep is not None
exp, pos, length = re.compile(sep), 0, len(s)
while True:
m = exp.search(s, pos)
@@ -89,10 +92,8 @@ def mute(returns_output=False):
"""
def decorator(func):
-
@wraps(func)
def wrapper(*args, **kwargs):
-
saved_stdout = sys.stdout
sys.stdout = StringIO()
@@ -112,7 +113,7 @@ def mute(returns_output=False):
def get_article(title, content, **extra_metadata):
metadata = default_metadata(settings=DEFAULT_CONFIG)
- metadata['title'] = title
+ metadata["title"] = title
if extra_metadata:
metadata.update(extra_metadata)
return Article(content, metadata=metadata)
@@ -125,14 +126,14 @@ def skipIfNoExecutable(executable):
and skips the tests if not found (if subprocess raises a `OSError`).
"""
- with open(os.devnull, 'w') as fnull:
+ with open(os.devnull, "w") as fnull:
try:
res = subprocess.call(executable, stdout=fnull, stderr=fnull)
except OSError:
res = None
if res is None:
- return unittest.skip('{} executable not found'.format(executable))
+ return unittest.skip("{} executable not found".format(executable))
return lambda func: func
@@ -164,10 +165,7 @@ def can_symlink():
res = True
try:
with temporary_folder() as f:
- os.symlink(
- f,
- os.path.join(f, 'symlink')
- )
+ os.symlink(f, os.path.join(f, "symlink"))
except OSError:
res = False
return res
@@ -186,9 +184,9 @@ def get_settings(**kwargs):
def get_context(settings=None, **kwargs):
context = settings.copy() if settings else {}
- context['generated_content'] = {}
- context['static_links'] = set()
- context['static_content'] = {}
+ context["generated_content"] = {}
+ context["static_links"] = set()
+ context["static_content"] = {}
context.update(kwargs)
return context
@@ -200,22 +198,24 @@ class LogCountHandler(BufferingHandler):
super().__init__(capacity)
def count_logs(self, msg=None, level=None):
- return len([
- rec
- for rec
- in self.buffer
- if (msg is None or re.match(msg, rec.getMessage())) and
- (level is None or rec.levelno == level)
- ])
+ return len(
+ [
+ rec
+ for rec in self.buffer
+ if (msg is None or re.match(msg, rec.getMessage()))
+ and (level is None or rec.levelno == level)
+ ]
+ )
def count_formatted_logs(self, msg=None, level=None):
- return len([
- rec
- for rec
- in self.buffer
- if (msg is None or re.search(msg, self.format(rec))) and
- (level is None or rec.levelno == level)
- ])
+ return len(
+ [
+ rec
+ for rec in self.buffer
+ if (msg is None or re.search(msg, self.format(rec)))
+ and (level is None or rec.levelno == level)
+ ]
+ )
def diff_subproc(first, second):
@@ -228,8 +228,16 @@ def diff_subproc(first, second):
>>> didCheckFail = proc.returnCode != 0
"""
return subprocess.Popen(
- ['git', '--no-pager', 'diff', '--no-ext-diff', '--exit-code',
- '-w', first, second],
+ [
+ "git",
+ "--no-pager",
+ "diff",
+ "--no-ext-diff",
+ "--exit-code",
+ "-w",
+ first,
+ second,
+ ],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
@@ -251,9 +259,12 @@ class LoggedTestCase(unittest.TestCase):
def assertLogCountEqual(self, count=None, msg=None, **kwargs):
actual = self._logcount_handler.count_logs(msg=msg, **kwargs)
self.assertEqual(
- actual, count,
- msg='expected {} occurrences of {!r}, but found {}'.format(
- count, msg, actual))
+ actual,
+ count,
+ msg="expected {} occurrences of {!r}, but found {}".format(
+ count, msg, actual
+ ),
+ )
class TestCaseWithCLocale(unittest.TestCase):
@@ -261,9 +272,10 @@ class TestCaseWithCLocale(unittest.TestCase):
Use utils.temporary_locale if you want a context manager ("with" statement).
"""
+
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
- locale.setlocale(locale.LC_ALL, 'C')
+ locale.setlocale(locale.LC_ALL, "C")
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
diff --git a/pelican/tests/test_cache.py b/pelican/tests/test_cache.py
index 564f1d31..6dc91b2c 100644
--- a/pelican/tests/test_cache.py
+++ b/pelican/tests/test_cache.py
@@ -8,31 +8,30 @@ from pelican.tests.support import get_context, get_settings, unittest
CUR_DIR = os.path.dirname(__file__)
-CONTENT_DIR = os.path.join(CUR_DIR, 'content')
+CONTENT_DIR = os.path.join(CUR_DIR, "content")
class TestCache(unittest.TestCase):
-
def setUp(self):
- self.temp_cache = mkdtemp(prefix='pelican_cache.')
+ self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self):
rmtree(self.temp_cache)
def _get_cache_enabled_settings(self):
settings = get_settings()
- settings['CACHE_CONTENT'] = True
- settings['LOAD_CONTENT_CACHE'] = True
- settings['CACHE_PATH'] = self.temp_cache
+ settings["CACHE_CONTENT"] = True
+ settings["LOAD_CONTENT_CACHE"] = True
+ settings["CACHE_PATH"] = self.temp_cache
return settings
def test_generator_caching(self):
"""Test that cached and uncached content is same in generator level"""
settings = self._get_cache_enabled_settings()
- settings['CONTENT_CACHING_LAYER'] = 'generator'
- settings['PAGE_PATHS'] = ['TestPages']
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['READERS'] = {'asc': None}
+ settings["CONTENT_CACHING_LAYER"] = "generator"
+ settings["PAGE_PATHS"] = ["TestPages"]
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
def sorted_titles(items):
@@ -40,15 +39,23 @@ class TestCache(unittest.TestCase):
# Articles
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts)
@@ -58,16 +65,24 @@ class TestCache(unittest.TestCase):
# Pages
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages)
uncached_draft_pages = sorted_titles(generator.draft_pages)
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages)
@@ -80,10 +95,10 @@ class TestCache(unittest.TestCase):
def test_reader_caching(self):
"""Test that cached and uncached content is same in reader level"""
settings = self._get_cache_enabled_settings()
- settings['CONTENT_CACHING_LAYER'] = 'reader'
- settings['PAGE_PATHS'] = ['TestPages']
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['READERS'] = {'asc': None}
+ settings["CONTENT_CACHING_LAYER"] = "reader"
+ settings["PAGE_PATHS"] = ["TestPages"]
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
def sorted_titles(items):
@@ -91,15 +106,23 @@ class TestCache(unittest.TestCase):
# Articles
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts)
@@ -109,15 +132,23 @@ class TestCache(unittest.TestCase):
# Pages
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages)
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages)
@@ -128,20 +159,28 @@ class TestCache(unittest.TestCase):
def test_article_object_caching(self):
"""Test Article objects caching at the generator level"""
settings = self._get_cache_enabled_settings()
- settings['CONTENT_CACHING_LAYER'] = 'generator'
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['READERS'] = {'asc': None}
+ settings["CONTENT_CACHING_LAYER"] = "generator"
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- self.assertTrue(hasattr(generator, '_cache'))
+ self.assertTrue(hasattr(generator, "_cache"))
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.readers.read_file = MagicMock()
generator.generate_context()
"""
@@ -158,18 +197,26 @@ class TestCache(unittest.TestCase):
def test_article_reader_content_caching(self):
"""Test raw article content caching at the reader level"""
settings = self._get_cache_enabled_settings()
- settings['READERS'] = {'asc': None}
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- self.assertTrue(hasattr(generator.readers, '_cache'))
+ self.assertTrue(hasattr(generator.readers, "_cache"))
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
readers = generator.readers.readers
for reader in readers.values():
reader.read = MagicMock()
@@ -182,44 +229,58 @@ class TestCache(unittest.TestCase):
used in --ignore-cache or autoreload mode"""
settings = self._get_cache_enabled_settings()
- settings['READERS'] = {'asc': None}
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.readers.read_file = MagicMock()
generator.generate_context()
- self.assertTrue(hasattr(generator, '_cache_open'))
+ self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count
- settings['LOAD_CONTENT_CACHE'] = False
+ settings["LOAD_CONTENT_CACHE"] = False
generator = ArticlesGenerator(
- context=context.copy(), settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.readers.read_file = MagicMock()
generator.generate_context()
- self.assertEqual(
- generator.readers.read_file.call_count,
- orig_call_count)
+ self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
def test_page_object_caching(self):
"""Test Page objects caching at the generator level"""
settings = self._get_cache_enabled_settings()
- settings['CONTENT_CACHING_LAYER'] = 'generator'
- settings['PAGE_PATHS'] = ['TestPages']
- settings['READERS'] = {'asc': None}
+ settings["CONTENT_CACHING_LAYER"] = "generator"
+ settings["PAGE_PATHS"] = ["TestPages"]
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- self.assertTrue(hasattr(generator, '_cache'))
+ self.assertTrue(hasattr(generator, "_cache"))
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.readers.read_file = MagicMock()
generator.generate_context()
"""
@@ -231,19 +292,27 @@ class TestCache(unittest.TestCase):
def test_page_reader_content_caching(self):
"""Test raw page content caching at the reader level"""
settings = self._get_cache_enabled_settings()
- settings['PAGE_PATHS'] = ['TestPages']
- settings['READERS'] = {'asc': None}
+ settings["PAGE_PATHS"] = ["TestPages"]
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- self.assertTrue(hasattr(generator.readers, '_cache'))
+ self.assertTrue(hasattr(generator.readers, "_cache"))
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
readers = generator.readers.readers
for reader in readers.values():
reader.read = MagicMock()
@@ -256,24 +325,30 @@ class TestCache(unittest.TestCase):
used in --ignore_cache or autoreload mode"""
settings = self._get_cache_enabled_settings()
- settings['PAGE_PATHS'] = ['TestPages']
- settings['READERS'] = {'asc': None}
+ settings["PAGE_PATHS"] = ["TestPages"]
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.readers.read_file = MagicMock()
generator.generate_context()
- self.assertTrue(hasattr(generator, '_cache_open'))
+ self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count
- settings['LOAD_CONTENT_CACHE'] = False
+ settings["LOAD_CONTENT_CACHE"] = False
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.readers.read_file = MagicMock()
generator.generate_context()
- self.assertEqual(
- generator.readers.read_file.call_count,
- orig_call_count)
+ self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
diff --git a/pelican/tests/test_cli.py b/pelican/tests/test_cli.py
index 13b307e7..0b9656be 100644
--- a/pelican/tests/test_cli.py
+++ b/pelican/tests/test_cli.py
@@ -5,68 +5,77 @@ from pelican import get_config, parse_arguments
class TestParseOverrides(unittest.TestCase):
def test_flags(self):
- for flag in ['-e', '--extra-settings']:
- args = parse_arguments([flag, 'k=1'])
- self.assertDictEqual(args.overrides, {'k': 1})
+ for flag in ["-e", "--extra-settings"]:
+ args = parse_arguments([flag, "k=1"])
+ self.assertDictEqual(args.overrides, {"k": 1})
def test_parse_multiple_items(self):
- args = parse_arguments('-e k1=1 k2=2'.split())
- self.assertDictEqual(args.overrides, {'k1': 1, 'k2': 2})
+ args = parse_arguments("-e k1=1 k2=2".split())
+ self.assertDictEqual(args.overrides, {"k1": 1, "k2": 2})
def test_parse_valid_json(self):
json_values_python_values_map = {
- '""': '',
- 'null': None,
- '"string"': 'string',
- '["foo", 12, "4", {}]': ['foo', 12, '4', {}]
+ '""': "",
+ "null": None,
+ '"string"': "string",
+ '["foo", 12, "4", {}]': ["foo", 12, "4", {}],
}
for k, v in json_values_python_values_map.items():
- args = parse_arguments(['-e', 'k=' + k])
- self.assertDictEqual(args.overrides, {'k': v})
+ args = parse_arguments(["-e", "k=" + k])
+ self.assertDictEqual(args.overrides, {"k": v})
def test_parse_invalid_syntax(self):
- invalid_items = ['k= 1', 'k =1', 'k', 'k v']
+ invalid_items = ["k= 1", "k =1", "k", "k v"]
for item in invalid_items:
with self.assertRaises(ValueError):
- parse_arguments(f'-e {item}'.split())
+ parse_arguments(f"-e {item}".split())
def test_parse_invalid_json(self):
invalid_json = {
- '', 'False', 'True', 'None', 'some other string',
- '{"foo": bar}', '[foo]'
+ "",
+ "False",
+ "True",
+ "None",
+ "some other string",
+ '{"foo": bar}',
+ "[foo]",
}
for v in invalid_json:
with self.assertRaises(ValueError):
- parse_arguments(['-e ', 'k=' + v])
+ parse_arguments(["-e ", "k=" + v])
class TestGetConfigFromArgs(unittest.TestCase):
def test_overrides_known_keys(self):
- args = parse_arguments([
- '-e',
- 'DELETE_OUTPUT_DIRECTORY=false',
- 'OUTPUT_RETENTION=["1.txt"]',
- 'SITENAME="Title"'
- ])
+ args = parse_arguments(
+ [
+ "-e",
+ "DELETE_OUTPUT_DIRECTORY=false",
+ 'OUTPUT_RETENTION=["1.txt"]',
+ 'SITENAME="Title"',
+ ]
+ )
config = get_config(args)
config_must_contain = {
- 'DELETE_OUTPUT_DIRECTORY': False,
- 'OUTPUT_RETENTION': ['1.txt'],
- 'SITENAME': 'Title'
+ "DELETE_OUTPUT_DIRECTORY": False,
+ "OUTPUT_RETENTION": ["1.txt"],
+ "SITENAME": "Title",
}
self.assertDictEqual(config, {**config, **config_must_contain})
def test_overrides_non_default_type(self):
- args = parse_arguments([
- '-e',
- 'DISPLAY_PAGES_ON_MENU=123',
- 'PAGE_TRANSLATION_ID=null',
- 'TRANSLATION_FEED_RSS_URL="someurl"'
- ])
+ args = parse_arguments(
+ [
+ "-e",
+ "DISPLAY_PAGES_ON_MENU=123",
+ "PAGE_TRANSLATION_ID=null",
+ 'TRANSLATION_FEED_RSS_URL="someurl"',
+ ]
+ )
config = get_config(args)
config_must_contain = {
- 'DISPLAY_PAGES_ON_MENU': 123,
- 'PAGE_TRANSLATION_ID': None,
- 'TRANSLATION_FEED_RSS_URL': 'someurl'
+ "DISPLAY_PAGES_ON_MENU": 123,
+ "PAGE_TRANSLATION_ID": None,
+ "TRANSLATION_FEED_RSS_URL": "someurl",
}
self.assertDictEqual(config, {**config, **config_must_contain})
diff --git a/pelican/tests/test_contents.py b/pelican/tests/test_contents.py
index 3a223b5a..9dc7b70d 100644
--- a/pelican/tests/test_contents.py
+++ b/pelican/tests/test_contents.py
@@ -10,9 +10,8 @@ from jinja2.utils import generate_lorem_ipsum
from pelican.contents import Article, Author, Category, Page, Static
from pelican.plugins.signals import content_object_init
from pelican.settings import DEFAULT_CONFIG
-from pelican.tests.support import (LoggedTestCase, get_context, get_settings,
- unittest)
-from pelican.utils import (path_to_url, posixize_path, truncate_html_words)
+from pelican.tests.support import LoggedTestCase, get_context, get_settings, unittest
+from pelican.utils import path_to_url, posixize_path, truncate_html_words
# generate one paragraph, enclosed with
@@ -21,25 +20,24 @@ TEST_SUMMARY = generate_lorem_ipsum(n=1, html=False)
class TestBase(LoggedTestCase):
-
def setUp(self):
super().setUp()
self.old_locale = locale.setlocale(locale.LC_ALL)
- locale.setlocale(locale.LC_ALL, 'C')
+ locale.setlocale(locale.LC_ALL, "C")
self.page_kwargs = {
- 'content': TEST_CONTENT,
- 'context': {
- 'localsiteurl': '',
- 'generated_content': {},
- 'static_content': {},
- 'static_links': set()
+ "content": TEST_CONTENT,
+ "context": {
+ "localsiteurl": "",
+ "generated_content": {},
+ "static_content": {},
+ "static_links": set(),
},
- 'metadata': {
- 'summary': TEST_SUMMARY,
- 'title': 'foo bar',
- 'author': Author('Blogger', DEFAULT_CONFIG),
+ "metadata": {
+ "summary": TEST_SUMMARY,
+ "title": "foo bar",
+ "author": Author("Blogger", DEFAULT_CONFIG),
},
- 'source_path': '/path/to/file/foo.ext'
+ "source_path": "/path/to/file/foo.ext",
}
self._disable_limit_filter()
@@ -49,10 +47,12 @@ class TestBase(LoggedTestCase):
def _disable_limit_filter(self):
from pelican.contents import logger
+
logger.disable_filter()
def _enable_limit_filter(self):
from pelican.contents import logger
+
logger.enable_filter()
def _copy_page_kwargs(self):
@@ -72,9 +72,12 @@ class TestPage(TestBase):
def test_use_args(self):
# Creating a page with arguments passed to the constructor should use
# them to initialise object's attributes.
- metadata = {'foo': 'bar', 'foobar': 'baz', 'title': 'foobar', }
- page = Page(TEST_CONTENT, metadata=metadata,
- context={'localsiteurl': ''})
+ metadata = {
+ "foo": "bar",
+ "foobar": "baz",
+ "title": "foobar",
+ }
+ page = Page(TEST_CONTENT, metadata=metadata, context={"localsiteurl": ""})
for key, value in metadata.items():
self.assertTrue(hasattr(page, key))
self.assertEqual(value, getattr(page, key))
@@ -82,13 +85,14 @@ class TestPage(TestBase):
def test_mandatory_properties(self):
# If the title is not set, must throw an exception.
- page = Page('content')
+ page = Page("content")
self.assertFalse(page._has_valid_mandatory_properties())
self.assertLogCountEqual(
- count=1,
- msg="Skipping .*: could not find information about 'title'",
- level=logging.ERROR)
- page = Page('content', metadata={'title': 'foobar'})
+ count=1,
+ msg="Skipping .*: could not find information about 'title'",
+ level=logging.ERROR,
+ )
+ page = Page("content", metadata={"title": "foobar"})
self.assertTrue(page._has_valid_mandatory_properties())
def test_summary_from_metadata(self):
@@ -101,31 +105,32 @@ class TestPage(TestBase):
# generated summary should not exceed the given length.
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
- page_kwargs['settings'] = settings
- del page_kwargs['metadata']['summary']
- settings['SUMMARY_MAX_LENGTH'] = None
+ page_kwargs["settings"] = settings
+ del page_kwargs["metadata"]["summary"]
+ settings["SUMMARY_MAX_LENGTH"] = None
page = Page(**page_kwargs)
self.assertEqual(page.summary, TEST_CONTENT)
- settings['SUMMARY_MAX_LENGTH'] = 10
+ settings["SUMMARY_MAX_LENGTH"] = 10
page = Page(**page_kwargs)
self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10))
- settings['SUMMARY_MAX_LENGTH'] = 0
+ settings["SUMMARY_MAX_LENGTH"] = 0
page = Page(**page_kwargs)
- self.assertEqual(page.summary, '')
+ self.assertEqual(page.summary, "")
def test_summary_end_suffix(self):
# If a :SUMMARY_END_SUFFIX: is set, and there is no other summary,
# generated summary should contain the specified marker at the end.
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
- page_kwargs['settings'] = settings
- del page_kwargs['metadata']['summary']
- settings['SUMMARY_END_SUFFIX'] = 'test_marker'
- settings['SUMMARY_MAX_LENGTH'] = 10
+ page_kwargs["settings"] = settings
+ del page_kwargs["metadata"]["summary"]
+ settings["SUMMARY_END_SUFFIX"] = "test_marker"
+ settings["SUMMARY_MAX_LENGTH"] = 10
page = Page(**page_kwargs)
- self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10,
- 'test_marker'))
- self.assertIn('test_marker', page.summary)
+ self.assertEqual(
+ page.summary, truncate_html_words(TEST_CONTENT, 10, "test_marker")
+ )
+ self.assertIn("test_marker", page.summary)
def test_summary_get_summary_warning(self):
"""calling ._get_summary() should issue a warning"""
@@ -134,57 +139,61 @@ class TestPage(TestBase):
self.assertEqual(page.summary, TEST_SUMMARY)
self.assertEqual(page._get_summary(), TEST_SUMMARY)
self.assertLogCountEqual(
- count=1,
- msg=r"_get_summary\(\) has been deprecated since 3\.6\.4\. "
- "Use the summary decorator instead",
- level=logging.WARNING)
+ count=1,
+ msg=r"_get_summary\(\) has been deprecated since 3\.6\.4\. "
+ "Use the summary decorator instead",
+ level=logging.WARNING,
+ )
def test_slug(self):
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
- page_kwargs['settings'] = settings
- settings['SLUGIFY_SOURCE'] = "title"
+ page_kwargs["settings"] = settings
+ settings["SLUGIFY_SOURCE"] = "title"
page = Page(**page_kwargs)
- self.assertEqual(page.slug, 'foo-bar')
- settings['SLUGIFY_SOURCE'] = "basename"
+ self.assertEqual(page.slug, "foo-bar")
+ settings["SLUGIFY_SOURCE"] = "basename"
page = Page(**page_kwargs)
- self.assertEqual(page.slug, 'foo')
+ self.assertEqual(page.slug, "foo")
# test slug from title with unicode and case
inputs = (
# (title, expected, preserve_case, use_unicode)
- ('指導書', 'zhi-dao-shu', False, False),
- ('指導書', 'Zhi-Dao-Shu', True, False),
- ('指導書', '指導書', False, True),
- ('指導書', '指導書', True, True),
- ('Çığ', 'cig', False, False),
- ('Çığ', 'Cig', True, False),
- ('Çığ', 'çığ', False, True),
- ('Çığ', 'Çığ', True, True),
+ ("指導書", "zhi-dao-shu", False, False),
+ ("指導書", "Zhi-Dao-Shu", True, False),
+ ("指導書", "指導書", False, True),
+ ("指導書", "指導書", True, True),
+ ("Çığ", "cig", False, False),
+ ("Çığ", "Cig", True, False),
+ ("Çığ", "çığ", False, True),
+ ("Çığ", "Çığ", True, True),
)
settings = get_settings()
page_kwargs = self._copy_page_kwargs()
- page_kwargs['settings'] = settings
+ page_kwargs["settings"] = settings
for title, expected, preserve_case, use_unicode in inputs:
- settings['SLUGIFY_PRESERVE_CASE'] = preserve_case
- settings['SLUGIFY_USE_UNICODE'] = use_unicode
- page_kwargs['metadata']['title'] = title
+ settings["SLUGIFY_PRESERVE_CASE"] = preserve_case
+ settings["SLUGIFY_USE_UNICODE"] = use_unicode
+ page_kwargs["metadata"]["title"] = title
page = Page(**page_kwargs)
- self.assertEqual(page.slug, expected,
- (title, preserve_case, use_unicode))
+ self.assertEqual(page.slug, expected, (title, preserve_case, use_unicode))
def test_defaultlang(self):
# If no lang is given, default to the default one.
page = Page(**self.page_kwargs)
- self.assertEqual(page.lang, DEFAULT_CONFIG['DEFAULT_LANG'])
+ self.assertEqual(page.lang, DEFAULT_CONFIG["DEFAULT_LANG"])
# it is possible to specify the lang in the metadata infos
- self.page_kwargs['metadata'].update({'lang': 'fr', })
+ self.page_kwargs["metadata"].update(
+ {
+ "lang": "fr",
+ }
+ )
page = Page(**self.page_kwargs)
- self.assertEqual(page.lang, 'fr')
+ self.assertEqual(page.lang, "fr")
def test_save_as(self):
# If a lang is not the default lang, save_as should be set
@@ -195,7 +204,11 @@ class TestPage(TestBase):
self.assertEqual(page.save_as, "pages/foo-bar.html")
# if a language is defined, save_as should include it accordingly
- self.page_kwargs['metadata'].update({'lang': 'fr', })
+ self.page_kwargs["metadata"].update(
+ {
+ "lang": "fr",
+ }
+ )
page = Page(**self.page_kwargs)
self.assertEqual(page.save_as, "pages/foo-bar-fr.html")
@@ -206,34 +219,32 @@ class TestPage(TestBase):
# If 'source_path' is None, 'relative_source_path' should
# also return None
- page_kwargs['source_path'] = None
+ page_kwargs["source_path"] = None
page = Page(**page_kwargs)
self.assertIsNone(page.relative_source_path)
page_kwargs = self._copy_page_kwargs()
settings = get_settings()
- full_path = page_kwargs['source_path']
+ full_path = page_kwargs["source_path"]
- settings['PATH'] = os.path.dirname(full_path)
- page_kwargs['settings'] = settings
+ settings["PATH"] = os.path.dirname(full_path)
+ page_kwargs["settings"] = settings
page = Page(**page_kwargs)
# if 'source_path' is set, 'relative_source_path' should
# return the relative path from 'PATH' to 'source_path'
self.assertEqual(
page.relative_source_path,
- os.path.relpath(
- full_path,
- os.path.dirname(full_path)
- ))
+ os.path.relpath(full_path, os.path.dirname(full_path)),
+ )
def test_metadata_url_format(self):
# Arbitrary metadata should be passed through url_format()
page = Page(**self.page_kwargs)
- self.assertIn('summary', page.url_format.keys())
- page.metadata['directory'] = 'test-dir'
- page.settings = get_settings(PAGE_SAVE_AS='{directory}/{slug}')
- self.assertEqual(page.save_as, 'test-dir/foo-bar')
+ self.assertIn("summary", page.url_format.keys())
+ page.metadata["directory"] = "test-dir"
+ page.settings = get_settings(PAGE_SAVE_AS="{directory}/{slug}")
+ self.assertEqual(page.save_as, "test-dir/foo-bar")
def test_datetime(self):
# If DATETIME is set to a tuple, it should be used to override LOCALE
@@ -242,28 +253,28 @@ class TestPage(TestBase):
page_kwargs = self._copy_page_kwargs()
# set its date to dt
- page_kwargs['metadata']['date'] = dt
+ page_kwargs["metadata"]["date"] = dt
page = Page(**page_kwargs)
# page.locale_date is a unicode string in both python2 and python3
- dt_date = dt.strftime(DEFAULT_CONFIG['DEFAULT_DATE_FORMAT'])
+ dt_date = dt.strftime(DEFAULT_CONFIG["DEFAULT_DATE_FORMAT"])
self.assertEqual(page.locale_date, dt_date)
- page_kwargs['settings'] = get_settings()
+ page_kwargs["settings"] = get_settings()
# I doubt this can work on all platforms ...
if platform == "win32":
- locale = 'jpn'
+ locale = "jpn"
else:
- locale = 'ja_JP.utf8'
- page_kwargs['settings']['DATE_FORMATS'] = {'jp': (locale,
- '%Y-%m-%d(%a)')}
- page_kwargs['metadata']['lang'] = 'jp'
+ locale = "ja_JP.utf8"
+ page_kwargs["settings"]["DATE_FORMATS"] = {"jp": (locale, "%Y-%m-%d(%a)")}
+ page_kwargs["metadata"]["lang"] = "jp"
import locale as locale_module
+
try:
page = Page(**page_kwargs)
- self.assertEqual(page.locale_date, '2015-09-13(\u65e5)')
+ self.assertEqual(page.locale_date, "2015-09-13(\u65e5)")
except locale_module.Error:
# The constructor of ``Page`` will try to set the locale to
# ``ja_JP.utf8``. But this attempt will failed when there is no
@@ -277,22 +288,21 @@ class TestPage(TestBase):
def test_template(self):
# Pages default to page, metadata overwrites
default_page = Page(**self.page_kwargs)
- self.assertEqual('page', default_page.template)
+ self.assertEqual("page", default_page.template)
page_kwargs = self._copy_page_kwargs()
- page_kwargs['metadata']['template'] = 'custom'
+ page_kwargs["metadata"]["template"] = "custom"
custom_page = Page(**page_kwargs)
- self.assertEqual('custom', custom_page.template)
+ self.assertEqual("custom", custom_page.template)
def test_signal(self):
def receiver_test_function(sender):
receiver_test_function.has_been_called = True
pass
+
receiver_test_function.has_been_called = False
content_object_init.connect(receiver_test_function)
- self.assertIn(
- receiver_test_function,
- content_object_init.receivers_for(Page))
+ self.assertIn(receiver_test_function, content_object_init.receivers_for(Page))
self.assertFalse(receiver_test_function.has_been_called)
Page(**self.page_kwargs)
@@ -303,102 +313,106 @@ class TestPage(TestBase):
# filenames, tags and categories.
settings = get_settings()
args = self.page_kwargs.copy()
- args['settings'] = settings
+ args["settings"] = settings
# Tag
- args['content'] = ('A simple test, with a '
- 'link ')
+ args["content"] = "A simple test, with a " 'link '
page = Page(**args)
- content = page.get_content('http://notmyidea.org')
+ content = page.get_content("http://notmyidea.org")
self.assertEqual(
content,
- ('A simple test, with a '
- 'link '))
+ (
+ "A simple test, with a "
+ 'link '
+ ),
+ )
# Category
- args['content'] = ('A simple test, with a '
- 'link ')
+ args["content"] = (
+ "A simple test, with a " 'link '
+ )
page = Page(**args)
- content = page.get_content('http://notmyidea.org')
+ content = page.get_content("http://notmyidea.org")
self.assertEqual(
content,
- ('A simple test, with a '
- 'link '))
+ (
+ "A simple test, with a "
+ 'link '
+ ),
+ )
def test_intrasite_link(self):
- cls_name = '_DummyArticle'
- article = type(cls_name, (object,), {'url': 'article.html'})
+ cls_name = "_DummyArticle"
+ article = type(cls_name, (object,), {"url": "article.html"})
args = self.page_kwargs.copy()
- args['settings'] = get_settings()
- args['source_path'] = 'content'
- args['context']['generated_content'] = {'article.rst': article}
+ args["settings"] = get_settings()
+ args["source_path"] = "content"
+ args["context"]["generated_content"] = {"article.rst": article}
# Classic intrasite link via filename
- args['content'] = (
- 'A simple test, with a '
- 'link '
+ args["content"] = (
+ "A simple test, with a " 'link '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'A simple test, with a '
- 'link '
+ "A simple test, with a "
+ 'link ',
)
# fragment
- args['content'] = (
- 'A simple test, with a '
+ args["content"] = (
+ "A simple test, with a "
'link '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'A simple test, with a '
- 'link '
+ "A simple test, with a "
+ 'link ',
)
# query
- args['content'] = (
- 'A simple test, with a '
+ args["content"] = (
+ "A simple test, with a "
'link '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'A simple test, with a '
+ "A simple test, with a "
'link '
+ '?utm_whatever=234&highlight=word">link',
)
# combination
- args['content'] = (
- 'A simple test, with a '
+ args["content"] = (
+ "A simple test, with a "
'link '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'A simple test, with a '
+ "A simple test, with a "
'link '
+ '?utm_whatever=234&highlight=word#section-2">link',
)
# also test for summary in metadata
parsed = (
- 'A simple summary test, with a '
- 'link '
+ "A simple summary test, with a " 'link '
)
linked = (
- 'A simple summary test, with a '
+ "A simple summary test, with a "
'link '
)
- args['settings']['FORMATTED_FIELDS'] = ['summary', 'custom']
- args['metadata']['summary'] = parsed
- args['metadata']['custom'] = parsed
- args['context']['localsiteurl'] = 'http://notmyidea.org'
+ args["settings"]["FORMATTED_FIELDS"] = ["summary", "custom"]
+ args["metadata"]["summary"] = parsed
+ args["metadata"]["custom"] = parsed
+ args["context"]["localsiteurl"] = "http://notmyidea.org"
p = Page(**args)
# This is called implicitly from all generators and Pelican.run() once
# all files are processed. Here we process just one page so it needs
@@ -408,252 +422,236 @@ class TestPage(TestBase):
self.assertEqual(p.custom, linked)
def test_intrasite_link_more(self):
- cls_name = '_DummyAsset'
+ cls_name = "_DummyAsset"
args = self.page_kwargs.copy()
- args['settings'] = get_settings()
- args['source_path'] = 'content'
- args['context']['static_content'] = {
- 'images/poster.jpg':
- type(cls_name, (object,), {'url': 'images/poster.jpg'}),
- 'assets/video.mp4':
- type(cls_name, (object,), {'url': 'assets/video.mp4'}),
- 'images/graph.svg':
- type(cls_name, (object,), {'url': 'images/graph.svg'}),
+ args["settings"] = get_settings()
+ args["source_path"] = "content"
+ args["context"]["static_content"] = {
+ "images/poster.jpg": type(
+ cls_name, (object,), {"url": "images/poster.jpg"}
+ ),
+ "assets/video.mp4": type(cls_name, (object,), {"url": "assets/video.mp4"}),
+ "images/graph.svg": type(cls_name, (object,), {"url": "images/graph.svg"}),
}
- args['context']['generated_content'] = {
- 'reference.rst':
- type(cls_name, (object,), {'url': 'reference.html'}),
+ args["context"]["generated_content"] = {
+ "reference.rst": type(cls_name, (object,), {"url": "reference.html"}),
}
# video.poster
- args['content'] = (
- 'There is a video with poster '
+ args["content"] = (
+ "There is a video with poster "
''
''
- ' '
+ ""
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'There is a video with poster '
+ "There is a video with poster "
''
''
- ' '
+ "",
)
# object.data
- args['content'] = (
- 'There is a svg object '
+ args["content"] = (
+ "There is a svg object "
''
- ' '
+ ""
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'There is a svg object '
+ "There is a svg object "
''
- ' '
+ "",
)
# blockquote.cite
- args['content'] = (
- 'There is a blockquote with cite attribute '
+ args["content"] = (
+ "There is a blockquote with cite attribute "
'
blah blah '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'There is a blockquote with cite attribute '
+ "There is a blockquote with cite attribute "
''
- 'blah blah'
- ' '
+ "blah blah"
+ "",
)
def test_intrasite_link_absolute(self):
"""Test that absolute URLs are merged properly."""
args = self.page_kwargs.copy()
- args['settings'] = get_settings(
- STATIC_URL='http://static.cool.site/{path}',
- ARTICLE_URL='http://blog.cool.site/{slug}.html')
- args['source_path'] = 'content'
- args['context']['static_content'] = {
- 'images/poster.jpg':
- Static('', settings=args['settings'],
- source_path='images/poster.jpg'),
+ args["settings"] = get_settings(
+ STATIC_URL="http://static.cool.site/{path}",
+ ARTICLE_URL="http://blog.cool.site/{slug}.html",
+ )
+ args["source_path"] = "content"
+ args["context"]["static_content"] = {
+ "images/poster.jpg": Static(
+ "", settings=args["settings"], source_path="images/poster.jpg"
+ ),
}
- args['context']['generated_content'] = {
- 'article.rst':
- Article('', settings=args['settings'], metadata={
- 'slug': 'article', 'title': 'Article'})
+ args["context"]["generated_content"] = {
+ "article.rst": Article(
+ "",
+ settings=args["settings"],
+ metadata={"slug": "article", "title": "Article"},
+ )
}
# Article link will go to blog
- args['content'] = (
- 'Article '
- )
- content = Page(**args).get_content('http://cool.site')
+ args["content"] = 'Article '
+ content = Page(**args).get_content("http://cool.site")
self.assertEqual(
- content,
- 'Article '
+ content, 'Article '
)
# Page link will go to the main site
- args['content'] = (
- 'Index '
- )
- content = Page(**args).get_content('http://cool.site')
+ args["content"] = 'Index '
+ content = Page(**args).get_content("http://cool.site")
+ self.assertEqual(content, 'Index ')
+
+ # Image link will go to static
+ args["content"] = ' '
+ content = Page(**args).get_content("http://cool.site")
self.assertEqual(
- content,
- 'Index '
+ content, ' '
)
# Image link will go to static
- args['content'] = (
- ' '
- )
- content = Page(**args).get_content('http://cool.site')
+ args["content"] = ' '
+ content = Page(**args).get_content("http://cool.site")
self.assertEqual(
- content,
- ' '
- )
-
- # Image link will go to static
- args['content'] = (
- ' '
- )
- content = Page(**args).get_content('http://cool.site')
- self.assertEqual(
- content,
- ' '
+ content, ' '
)
def test_intrasite_link_escape(self):
- article = type(
- '_DummyArticle', (object,), {'url': 'article-spaces.html'})
- asset = type(
- '_DummyAsset', (object,), {'url': 'name@example.com'})
+ article = type("_DummyArticle", (object,), {"url": "article-spaces.html"})
+ asset = type("_DummyAsset", (object,), {"url": "name@example.com"})
args = self.page_kwargs.copy()
- args['settings'] = get_settings()
- args['source_path'] = 'content'
- args['context']['generated_content'] = {'article spaces.rst': article}
- args['context']['static_content'] = {'name@example.com': asset}
+ args["settings"] = get_settings()
+ args["source_path"] = "content"
+ args["context"]["generated_content"] = {"article spaces.rst": article}
+ args["context"]["static_content"] = {"name@example.com": asset}
expected_output = (
- 'A simple test with a '
+ "A simple test with a "
'link '
'file '
)
# not escaped
- args['content'] = (
- 'A simple test with a '
+ args["content"] = (
+ "A simple test with a "
'link '
'file '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
# html escaped
- args['content'] = (
- 'A simple test with a '
+ args["content"] = (
+ "A simple test with a "
'link '
'file '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
# url escaped
- args['content'] = (
- 'A simple test with a '
+ args["content"] = (
+ "A simple test with a "
'link '
'file '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
# html and url escaped
- args['content'] = (
- 'A simple test with a '
+ args["content"] = (
+ "A simple test with a "
'link '
'file '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(content, expected_output)
def test_intrasite_link_markdown_spaces(self):
- cls_name = '_DummyArticle'
- article = type(cls_name, (object,), {'url': 'article-spaces.html'})
+ cls_name = "_DummyArticle"
+ article = type(cls_name, (object,), {"url": "article-spaces.html"})
args = self.page_kwargs.copy()
- args['settings'] = get_settings()
- args['source_path'] = 'content'
- args['context']['generated_content'] = {'article spaces.rst': article}
+ args["settings"] = get_settings()
+ args["source_path"] = "content"
+ args["context"]["generated_content"] = {"article spaces.rst": article}
# An intrasite link via filename with %20 as a space
- args['content'] = (
- 'A simple test, with a '
- 'link '
+ args["content"] = (
+ "A simple test, with a " 'link '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'A simple test, with a '
- 'link '
+ "A simple test, with a "
+ 'link ',
)
def test_intrasite_link_source_and_generated(self):
- """Test linking both to the source and the generated article
- """
- cls_name = '_DummyAsset'
+ """Test linking both to the source and the generated article"""
+ cls_name = "_DummyAsset"
args = self.page_kwargs.copy()
- args['settings'] = get_settings()
- args['source_path'] = 'content'
- args['context']['generated_content'] = {
- 'article.rst': type(cls_name, (object,), {'url': 'article.html'})}
- args['context']['static_content'] = {
- 'article.rst': type(cls_name, (object,), {'url': 'article.rst'})}
+ args["settings"] = get_settings()
+ args["source_path"] = "content"
+ args["context"]["generated_content"] = {
+ "article.rst": type(cls_name, (object,), {"url": "article.html"})
+ }
+ args["context"]["static_content"] = {
+ "article.rst": type(cls_name, (object,), {"url": "article.rst"})
+ }
- args['content'] = (
- 'A simple test, with a link to an'
+ args["content"] = (
+ "A simple test, with a link to an"
'article and its'
'source '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'A simple test, with a link to an'
+ "A simple test, with a link to an"
'article and its'
- 'source '
+ 'source ',
)
def test_intrasite_link_to_static_content_with_filename(self):
- """Test linking to a static resource with deprecated {filename}
- """
- cls_name = '_DummyAsset'
+ """Test linking to a static resource with deprecated {filename}"""
+ cls_name = "_DummyAsset"
args = self.page_kwargs.copy()
- args['settings'] = get_settings()
- args['source_path'] = 'content'
- args['context']['static_content'] = {
- 'poster.jpg':
- type(cls_name, (object,), {'url': 'images/poster.jpg'})}
+ args["settings"] = get_settings()
+ args["source_path"] = "content"
+ args["context"]["static_content"] = {
+ "poster.jpg": type(cls_name, (object,), {"url": "images/poster.jpg"})
+ }
- args['content'] = (
- 'A simple test, with a link to a'
+ args["content"] = (
+ "A simple test, with a link to a"
'poster '
)
- content = Page(**args).get_content('http://notmyidea.org')
+ content = Page(**args).get_content("http://notmyidea.org")
self.assertEqual(
content,
- 'A simple test, with a link to a'
- 'poster '
+ "A simple test, with a link to a"
+ 'poster ',
)
def test_multiple_authors(self):
@@ -661,9 +659,11 @@ class TestPage(TestBase):
args = self.page_kwargs.copy()
content = Page(**args)
assert content.authors == [content.author]
- args['metadata'].pop('author')
- args['metadata']['authors'] = [Author('First Author', DEFAULT_CONFIG),
- Author('Second Author', DEFAULT_CONFIG)]
+ args["metadata"].pop("author")
+ args["metadata"]["authors"] = [
+ Author("First Author", DEFAULT_CONFIG),
+ Author("Second Author", DEFAULT_CONFIG),
+ ]
content = Page(**args)
assert content.authors
assert content.author == content.authors[0]
@@ -673,173 +673,184 @@ class TestArticle(TestBase):
def test_template(self):
# Articles default to article, metadata overwrites
default_article = Article(**self.page_kwargs)
- self.assertEqual('article', default_article.template)
+ self.assertEqual("article", default_article.template)
article_kwargs = self._copy_page_kwargs()
- article_kwargs['metadata']['template'] = 'custom'
+ article_kwargs["metadata"]["template"] = "custom"
custom_article = Article(**article_kwargs)
- self.assertEqual('custom', custom_article.template)
+ self.assertEqual("custom", custom_article.template)
def test_slugify_category_author(self):
settings = get_settings()
- settings['SLUG_REGEX_SUBSTITUTIONS'] = [
- (r'C#', 'csharp'),
- (r'[^\w\s-]', ''),
- (r'(?u)\A\s*', ''),
- (r'(?u)\s*\Z', ''),
- (r'[-\s]+', '-'),
+ settings["SLUG_REGEX_SUBSTITUTIONS"] = [
+ (r"C#", "csharp"),
+ (r"[^\w\s-]", ""),
+ (r"(?u)\A\s*", ""),
+ (r"(?u)\s*\Z", ""),
+ (r"[-\s]+", "-"),
]
- settings['ARTICLE_URL'] = '{author}/{category}/{slug}/'
- settings['ARTICLE_SAVE_AS'] = '{author}/{category}/{slug}/index.html'
+ settings["ARTICLE_URL"] = "{author}/{category}/{slug}/"
+ settings["ARTICLE_SAVE_AS"] = "{author}/{category}/{slug}/index.html"
article_kwargs = self._copy_page_kwargs()
- article_kwargs['metadata']['author'] = Author("O'Brien", settings)
- article_kwargs['metadata']['category'] = Category(
- 'C# & stuff', settings)
- article_kwargs['metadata']['title'] = 'fnord'
- article_kwargs['settings'] = settings
+ article_kwargs["metadata"]["author"] = Author("O'Brien", settings)
+ article_kwargs["metadata"]["category"] = Category("C# & stuff", settings)
+ article_kwargs["metadata"]["title"] = "fnord"
+ article_kwargs["settings"] = settings
article = Article(**article_kwargs)
- self.assertEqual(article.url, 'obrien/csharp-stuff/fnord/')
- self.assertEqual(
- article.save_as, 'obrien/csharp-stuff/fnord/index.html')
+ self.assertEqual(article.url, "obrien/csharp-stuff/fnord/")
+ self.assertEqual(article.save_as, "obrien/csharp-stuff/fnord/index.html")
def test_slugify_with_author_substitutions(self):
settings = get_settings()
- settings['AUTHOR_REGEX_SUBSTITUTIONS'] = [
- ('Alexander Todorov', 'atodorov'),
- ('Krasimir Tsonev', 'krasimir'),
- (r'[^\w\s-]', ''),
- (r'(?u)\A\s*', ''),
- (r'(?u)\s*\Z', ''),
- (r'[-\s]+', '-'),
+ settings["AUTHOR_REGEX_SUBSTITUTIONS"] = [
+ ("Alexander Todorov", "atodorov"),
+ ("Krasimir Tsonev", "krasimir"),
+ (r"[^\w\s-]", ""),
+ (r"(?u)\A\s*", ""),
+ (r"(?u)\s*\Z", ""),
+ (r"[-\s]+", "-"),
]
- settings['ARTICLE_URL'] = 'blog/{author}/{slug}/'
- settings['ARTICLE_SAVE_AS'] = 'blog/{author}/{slug}/index.html'
+ settings["ARTICLE_URL"] = "blog/{author}/{slug}/"
+ settings["ARTICLE_SAVE_AS"] = "blog/{author}/{slug}/index.html"
article_kwargs = self._copy_page_kwargs()
- article_kwargs['metadata']['author'] = Author('Alexander Todorov',
- settings)
- article_kwargs['metadata']['title'] = 'fnord'
- article_kwargs['settings'] = settings
+ article_kwargs["metadata"]["author"] = Author("Alexander Todorov", settings)
+ article_kwargs["metadata"]["title"] = "fnord"
+ article_kwargs["settings"] = settings
article = Article(**article_kwargs)
- self.assertEqual(article.url, 'blog/atodorov/fnord/')
- self.assertEqual(article.save_as, 'blog/atodorov/fnord/index.html')
+ self.assertEqual(article.url, "blog/atodorov/fnord/")
+ self.assertEqual(article.save_as, "blog/atodorov/fnord/index.html")
def test_slugify_category_with_dots(self):
settings = get_settings()
- settings['CATEGORY_REGEX_SUBSTITUTIONS'] = [
- ('Fedora QA', 'fedora.qa'),
+ settings["CATEGORY_REGEX_SUBSTITUTIONS"] = [
+ ("Fedora QA", "fedora.qa"),
]
- settings['ARTICLE_URL'] = '{category}/{slug}/'
+ settings["ARTICLE_URL"] = "{category}/{slug}/"
article_kwargs = self._copy_page_kwargs()
- article_kwargs['metadata']['category'] = Category('Fedora QA',
- settings)
- article_kwargs['metadata']['title'] = 'This Week in Fedora QA'
- article_kwargs['settings'] = settings
+ article_kwargs["metadata"]["category"] = Category("Fedora QA", settings)
+ article_kwargs["metadata"]["title"] = "This Week in Fedora QA"
+ article_kwargs["settings"] = settings
article = Article(**article_kwargs)
- self.assertEqual(article.url, 'fedora.qa/this-week-in-fedora-qa/')
+ self.assertEqual(article.url, "fedora.qa/this-week-in-fedora-qa/")
def test_valid_save_as_detects_breakout(self):
settings = get_settings()
article_kwargs = self._copy_page_kwargs()
- article_kwargs['metadata']['slug'] = '../foo'
- article_kwargs['settings'] = settings
+ article_kwargs["metadata"]["slug"] = "../foo"
+ article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertFalse(article._has_valid_save_as())
def test_valid_save_as_detects_breakout_to_root(self):
settings = get_settings()
article_kwargs = self._copy_page_kwargs()
- article_kwargs['metadata']['slug'] = '/foo'
- article_kwargs['settings'] = settings
+ article_kwargs["metadata"]["slug"] = "/foo"
+ article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertFalse(article._has_valid_save_as())
def test_valid_save_as_passes_valid(self):
settings = get_settings()
article_kwargs = self._copy_page_kwargs()
- article_kwargs['metadata']['slug'] = 'foo'
- article_kwargs['settings'] = settings
+ article_kwargs["metadata"]["slug"] = "foo"
+ article_kwargs["settings"] = settings
article = Article(**article_kwargs)
self.assertTrue(article._has_valid_save_as())
class TestStatic(LoggedTestCase):
-
def setUp(self):
super().setUp()
self.settings = get_settings(
- STATIC_SAVE_AS='{path}',
- STATIC_URL='{path}',
- PAGE_SAVE_AS=os.path.join('outpages', '{slug}.html'),
- PAGE_URL='outpages/{slug}.html')
+ STATIC_SAVE_AS="{path}",
+ STATIC_URL="{path}",
+ PAGE_SAVE_AS=os.path.join("outpages", "{slug}.html"),
+ PAGE_URL="outpages/{slug}.html",
+ )
self.context = get_context(self.settings)
- self.static = Static(content=None, metadata={}, settings=self.settings,
- source_path=posix_join('dir', 'foo.jpg'),
- context=self.context)
+ self.static = Static(
+ content=None,
+ metadata={},
+ settings=self.settings,
+ source_path=posix_join("dir", "foo.jpg"),
+ context=self.context,
+ )
- self.context['static_content'][self.static.source_path] = self.static
+ self.context["static_content"][self.static.source_path] = self.static
def tearDown(self):
pass
def test_attach_to_same_dir(self):
- """attach_to() overrides a static file's save_as and url.
- """
+ """attach_to() overrides a static file's save_as and url."""
page = Page(
content="fake page",
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'fakepage.md'))
+ source_path=os.path.join("dir", "fakepage.md"),
+ )
self.static.attach_to(page)
- expected_save_as = os.path.join('outpages', 'foo.jpg')
+ expected_save_as = os.path.join("outpages", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
def test_attach_to_parent_dir(self):
- """attach_to() preserves dirs inside the linking document dir.
- """
- page = Page(content="fake page", metadata={'title': 'fakepage'},
- settings=self.settings, source_path='fakepage.md')
+ """attach_to() preserves dirs inside the linking document dir."""
+ page = Page(
+ content="fake page",
+ metadata={"title": "fakepage"},
+ settings=self.settings,
+ source_path="fakepage.md",
+ )
self.static.attach_to(page)
- expected_save_as = os.path.join('outpages', 'dir', 'foo.jpg')
+ expected_save_as = os.path.join("outpages", "dir", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
def test_attach_to_other_dir(self):
- """attach_to() ignores dirs outside the linking document dir.
- """
- page = Page(content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'))
+ """attach_to() ignores dirs outside the linking document dir."""
+ page = Page(
+ content="fake page",
+ metadata={"title": "fakepage"},
+ settings=self.settings,
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ )
self.static.attach_to(page)
- expected_save_as = os.path.join('outpages', 'foo.jpg')
+ expected_save_as = os.path.join("outpages", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
def test_attach_to_ignores_subsequent_calls(self):
- """attach_to() does nothing when called a second time.
- """
- page = Page(content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
- source_path=os.path.join('dir', 'fakepage.md'))
+ """attach_to() does nothing when called a second time."""
+ page = Page(
+ content="fake page",
+ metadata={"title": "fakepage"},
+ settings=self.settings,
+ source_path=os.path.join("dir", "fakepage.md"),
+ )
self.static.attach_to(page)
otherdir_settings = self.settings.copy()
- otherdir_settings.update(dict(
- PAGE_SAVE_AS=os.path.join('otherpages', '{slug}.html'),
- PAGE_URL='otherpages/{slug}.html'))
+ otherdir_settings.update(
+ dict(
+ PAGE_SAVE_AS=os.path.join("otherpages", "{slug}.html"),
+ PAGE_URL="otherpages/{slug}.html",
+ )
+ )
otherdir_page = Page(
content="other page",
- metadata={'title': 'otherpage'},
+ metadata={"title": "otherpage"},
settings=otherdir_settings,
- source_path=os.path.join('dir', 'otherpage.md'))
+ source_path=os.path.join("dir", "otherpage.md"),
+ )
self.static.attach_to(otherdir_page)
- otherdir_save_as = os.path.join('otherpages', 'foo.jpg')
+ otherdir_save_as = os.path.join("otherpages", "foo.jpg")
self.assertNotEqual(self.static.save_as, otherdir_save_as)
self.assertNotEqual(self.static.url, path_to_url(otherdir_save_as))
@@ -851,9 +862,10 @@ class TestStatic(LoggedTestCase):
page = Page(
content="fake page",
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'fakepage.md'))
+ source_path=os.path.join("dir", "fakepage.md"),
+ )
self.static.attach_to(page)
self.assertEqual(self.static.save_as, original_save_as)
@@ -867,9 +879,10 @@ class TestStatic(LoggedTestCase):
page = Page(
content="fake page",
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'fakepage.md'))
+ source_path=os.path.join("dir", "fakepage.md"),
+ )
self.static.attach_to(page)
self.assertEqual(self.static.save_as, self.static.source_path)
@@ -881,38 +894,41 @@ class TestStatic(LoggedTestCase):
"""
customstatic = Static(
content=None,
- metadata=dict(save_as='customfoo.jpg', url='customfoo.jpg'),
+ metadata=dict(save_as="customfoo.jpg", url="customfoo.jpg"),
settings=self.settings,
- source_path=os.path.join('dir', 'foo.jpg'),
- context=self.settings.copy())
+ source_path=os.path.join("dir", "foo.jpg"),
+ context=self.settings.copy(),
+ )
page = Page(
content="fake page",
- metadata={'title': 'fakepage'}, settings=self.settings,
- source_path=os.path.join('dir', 'fakepage.md'))
+ metadata={"title": "fakepage"},
+ settings=self.settings,
+ source_path=os.path.join("dir", "fakepage.md"),
+ )
customstatic.attach_to(page)
- self.assertEqual(customstatic.save_as, 'customfoo.jpg')
- self.assertEqual(customstatic.url, 'customfoo.jpg')
+ self.assertEqual(customstatic.save_as, "customfoo.jpg")
+ self.assertEqual(customstatic.url, "customfoo.jpg")
def test_attach_link_syntax(self):
- """{attach} link syntax triggers output path override & url replacement.
- """
+ """{attach} link syntax triggers output path override & url replacement."""
html = 'link '
page = Page(
content=html,
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertNotEqual(
- content, html,
- "{attach} link syntax did not trigger URL replacement.")
+ content, html, "{attach} link syntax did not trigger URL replacement."
+ )
- expected_save_as = os.path.join('outpages', 'foo.jpg')
+ expected_save_as = os.path.join("outpages", "foo.jpg")
self.assertEqual(self.static.save_as, expected_save_as)
self.assertEqual(self.static.url, path_to_url(expected_save_as))
@@ -922,11 +938,12 @@ class TestStatic(LoggedTestCase):
html = 'link '
page = Page(
content=html,
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertNotEqual(content, html)
@@ -936,11 +953,12 @@ class TestStatic(LoggedTestCase):
html = 'link '
page = Page(
content=html,
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertNotEqual(content, html)
@@ -950,11 +968,12 @@ class TestStatic(LoggedTestCase):
html = 'link '
page = Page(
content=html,
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertNotEqual(content, html)
@@ -964,52 +983,62 @@ class TestStatic(LoggedTestCase):
html = 'link '
page = Page(
content=html,
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertNotEqual(content, html)
- expected_html = ('link ')
+ expected_html = (
+ 'link '
+ )
self.assertEqual(content, expected_html)
def test_unknown_link_syntax(self):
"{unknown} link syntax should trigger warning."
html = 'link '
- page = Page(content=html,
- metadata={'title': 'fakepage'}, settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ page = Page(
+ content=html,
+ metadata={"title": "fakepage"},
+ settings=self.settings,
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertEqual(content, html)
self.assertLogCountEqual(
count=1,
msg="Replacement Indicator 'unknown' not recognized, "
- "skipping replacement",
- level=logging.WARNING)
+ "skipping replacement",
+ level=logging.WARNING,
+ )
def test_link_to_unknown_file(self):
"{filename} link to unknown file should trigger warning."
html = 'link '
- page = Page(content=html,
- metadata={'title': 'fakepage'}, settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ page = Page(
+ content=html,
+ metadata={"title": "fakepage"},
+ settings=self.settings,
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertEqual(content, html)
self.assertLogCountEqual(
count=1,
msg="Unable to find 'foo', skipping url replacement.",
- level=logging.WARNING)
+ level=logging.WARNING,
+ )
def test_index_link_syntax_with_spaces(self):
"""{index} link syntax triggers url replacement
@@ -1018,18 +1047,20 @@ class TestStatic(LoggedTestCase):
html = 'link '
page = Page(
content=html,
- metadata={'title': 'fakepage'},
+ metadata={"title": "fakepage"},
settings=self.settings,
- source_path=os.path.join('dir', 'otherdir', 'fakepage.md'),
- context=self.context)
- content = page.get_content('')
+ source_path=os.path.join("dir", "otherdir", "fakepage.md"),
+ context=self.context,
+ )
+ content = page.get_content("")
self.assertNotEqual(content, html)
- expected_html = ('link ')
+ expected_html = (
+ 'link '
+ )
self.assertEqual(content, expected_html)
def test_not_save_as_draft(self):
@@ -1037,12 +1068,15 @@ class TestStatic(LoggedTestCase):
static = Static(
content=None,
- metadata=dict(status='draft',),
+ metadata=dict(
+ status="draft",
+ ),
settings=self.settings,
- source_path=os.path.join('dir', 'foo.jpg'),
- context=self.settings.copy())
+ source_path=os.path.join("dir", "foo.jpg"),
+ context=self.settings.copy(),
+ )
- expected_save_as = posixize_path(os.path.join('dir', 'foo.jpg'))
- self.assertEqual(static.status, 'draft')
+ expected_save_as = posixize_path(os.path.join("dir", "foo.jpg"))
+ self.assertEqual(static.status, "draft")
self.assertEqual(static.save_as, expected_save_as)
self.assertEqual(static.url, path_to_url(expected_save_as))
diff --git a/pelican/tests/test_generators.py b/pelican/tests/test_generators.py
index 05c37269..52adb2c9 100644
--- a/pelican/tests/test_generators.py
+++ b/pelican/tests/test_generators.py
@@ -4,293 +4,383 @@ from shutil import copy, rmtree
from tempfile import mkdtemp
from unittest.mock import MagicMock
-from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator,
- PelicanTemplateNotFound, StaticGenerator,
- TemplatePagesGenerator)
-from pelican.tests.support import (can_symlink, get_context, get_settings,
- unittest, TestCaseWithCLocale)
+from pelican.generators import (
+ ArticlesGenerator,
+ Generator,
+ PagesGenerator,
+ PelicanTemplateNotFound,
+ StaticGenerator,
+ TemplatePagesGenerator,
+)
+from pelican.tests.support import (
+ can_symlink,
+ get_context,
+ get_settings,
+ unittest,
+ TestCaseWithCLocale,
+)
from pelican.writers import Writer
CUR_DIR = os.path.dirname(__file__)
-CONTENT_DIR = os.path.join(CUR_DIR, 'content')
+CONTENT_DIR = os.path.join(CUR_DIR, "content")
class TestGenerator(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.settings = get_settings()
- self.settings['READERS'] = {'asc': None}
- self.generator = Generator(self.settings.copy(), self.settings,
- CUR_DIR, self.settings['THEME'], None)
+ self.settings["READERS"] = {"asc": None}
+ self.generator = Generator(
+ self.settings.copy(), self.settings, CUR_DIR, self.settings["THEME"], None
+ )
def test_include_path(self):
- self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'}
+ self.settings["IGNORE_FILES"] = {"ignored1.rst", "ignored2.rst"}
- filename = os.path.join(CUR_DIR, 'content', 'article.rst')
+ filename = os.path.join(CUR_DIR, "content", "article.rst")
include_path = self.generator._include_path
self.assertTrue(include_path(filename))
- self.assertTrue(include_path(filename, extensions=('rst',)))
- self.assertFalse(include_path(filename, extensions=('md',)))
+ self.assertTrue(include_path(filename, extensions=("rst",)))
+ self.assertFalse(include_path(filename, extensions=("md",)))
- ignored_file = os.path.join(CUR_DIR, 'content', 'ignored1.rst')
+ ignored_file = os.path.join(CUR_DIR, "content", "ignored1.rst")
self.assertFalse(include_path(ignored_file))
def test_get_files_exclude(self):
- """Test that Generator.get_files() properly excludes directories.
- """
+ """Test that Generator.get_files() properly excludes directories."""
# We use our own Generator so we can give it our own content path
generator = Generator(
context=self.settings.copy(),
settings=self.settings,
- path=os.path.join(CUR_DIR, 'nested_content'),
- theme=self.settings['THEME'], output_path=None)
+ path=os.path.join(CUR_DIR, "nested_content"),
+ theme=self.settings["THEME"],
+ output_path=None,
+ )
- filepaths = generator.get_files(paths=['maindir'])
+ filepaths = generator.get_files(paths=["maindir"])
found_files = {os.path.basename(f) for f in filepaths}
- expected_files = {'maindir.md', 'subdir.md'}
+ expected_files = {"maindir.md", "subdir.md"}
self.assertFalse(
- expected_files - found_files,
- "get_files() failed to find one or more files")
+ expected_files - found_files, "get_files() failed to find one or more files"
+ )
# Test string as `paths` argument rather than list
- filepaths = generator.get_files(paths='maindir')
+ filepaths = generator.get_files(paths="maindir")
found_files = {os.path.basename(f) for f in filepaths}
- expected_files = {'maindir.md', 'subdir.md'}
+ expected_files = {"maindir.md", "subdir.md"}
self.assertFalse(
- expected_files - found_files,
- "get_files() failed to find one or more files")
+ expected_files - found_files, "get_files() failed to find one or more files"
+ )
- filepaths = generator.get_files(paths=[''], exclude=['maindir'])
+ filepaths = generator.get_files(paths=[""], exclude=["maindir"])
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn(
- 'maindir.md', found_files,
- "get_files() failed to exclude a top-level directory")
+ "maindir.md",
+ found_files,
+ "get_files() failed to exclude a top-level directory",
+ )
self.assertNotIn(
- 'subdir.md', found_files,
- "get_files() failed to exclude a subdir of an excluded directory")
+ "subdir.md",
+ found_files,
+ "get_files() failed to exclude a subdir of an excluded directory",
+ )
filepaths = generator.get_files(
- paths=[''],
- exclude=[os.path.join('maindir', 'subdir')])
+ paths=[""], exclude=[os.path.join("maindir", "subdir")]
+ )
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn(
- 'subdir.md', found_files,
- "get_files() failed to exclude a subdirectory")
+ "subdir.md", found_files, "get_files() failed to exclude a subdirectory"
+ )
- filepaths = generator.get_files(paths=[''], exclude=['subdir'])
+ filepaths = generator.get_files(paths=[""], exclude=["subdir"])
found_files = {os.path.basename(f) for f in filepaths}
self.assertIn(
- 'subdir.md', found_files,
- "get_files() excluded a subdirectory by name, ignoring its path")
+ "subdir.md",
+ found_files,
+ "get_files() excluded a subdirectory by name, ignoring its path",
+ )
def test_custom_jinja_environment(self):
"""
- Test that setting the JINJA_ENVIRONMENT
- properly gets set from the settings config
+ Test that setting the JINJA_ENVIRONMENT
+ properly gets set from the settings config
"""
settings = get_settings()
- comment_start_string = 'abc'
- comment_end_string = '/abc'
- settings['JINJA_ENVIRONMENT'] = {
- 'comment_start_string': comment_start_string,
- 'comment_end_string': comment_end_string
+ comment_start_string = "abc"
+ comment_end_string = "/abc"
+ settings["JINJA_ENVIRONMENT"] = {
+ "comment_start_string": comment_start_string,
+ "comment_end_string": comment_end_string,
}
- generator = Generator(settings.copy(), settings,
- CUR_DIR, settings['THEME'], None)
- self.assertEqual(comment_start_string,
- generator.env.comment_start_string)
- self.assertEqual(comment_end_string,
- generator.env.comment_end_string)
+ generator = Generator(
+ settings.copy(), settings, CUR_DIR, settings["THEME"], None
+ )
+ self.assertEqual(comment_start_string, generator.env.comment_start_string)
+ self.assertEqual(comment_end_string, generator.env.comment_end_string)
def test_theme_overrides(self):
"""
- Test that the THEME_TEMPLATES_OVERRIDES configuration setting is
- utilized correctly in the Generator.
+ Test that the THEME_TEMPLATES_OVERRIDES configuration setting is
+ utilized correctly in the Generator.
"""
- override_dirs = (os.path.join(CUR_DIR, 'theme_overrides', 'level1'),
- os.path.join(CUR_DIR, 'theme_overrides', 'level2'))
- self.settings['THEME_TEMPLATES_OVERRIDES'] = override_dirs
+ override_dirs = (
+ os.path.join(CUR_DIR, "theme_overrides", "level1"),
+ os.path.join(CUR_DIR, "theme_overrides", "level2"),
+ )
+ self.settings["THEME_TEMPLATES_OVERRIDES"] = override_dirs
generator = Generator(
context=self.settings.copy(),
settings=self.settings,
path=CUR_DIR,
- theme=self.settings['THEME'],
- output_path=None)
+ theme=self.settings["THEME"],
+ output_path=None,
+ )
- filename = generator.get_template('article').filename
+ filename = generator.get_template("article").filename
self.assertEqual(override_dirs[0], os.path.dirname(filename))
- self.assertEqual('article.html', os.path.basename(filename))
+ self.assertEqual("article.html", os.path.basename(filename))
- filename = generator.get_template('authors').filename
+ filename = generator.get_template("authors").filename
self.assertEqual(override_dirs[1], os.path.dirname(filename))
- self.assertEqual('authors.html', os.path.basename(filename))
+ self.assertEqual("authors.html", os.path.basename(filename))
- filename = generator.get_template('taglist').filename
- self.assertEqual(os.path.join(self.settings['THEME'], 'templates'),
- os.path.dirname(filename))
+ filename = generator.get_template("taglist").filename
+ self.assertEqual(
+ os.path.join(self.settings["THEME"], "templates"), os.path.dirname(filename)
+ )
self.assertNotIn(os.path.dirname(filename), override_dirs)
- self.assertEqual('taglist.html', os.path.basename(filename))
+ self.assertEqual("taglist.html", os.path.basename(filename))
def test_simple_prefix(self):
"""
- Test `!simple` theme prefix.
+ Test `!simple` theme prefix.
"""
- filename = self.generator.get_template('!simple/authors').filename
+ filename = self.generator.get_template("!simple/authors").filename
expected_path = os.path.join(
- os.path.dirname(CUR_DIR), 'themes', 'simple', 'templates')
+ os.path.dirname(CUR_DIR), "themes", "simple", "templates"
+ )
self.assertEqual(expected_path, os.path.dirname(filename))
- self.assertEqual('authors.html', os.path.basename(filename))
+ self.assertEqual("authors.html", os.path.basename(filename))
def test_theme_prefix(self):
"""
- Test `!theme` theme prefix.
+ Test `!theme` theme prefix.
"""
- filename = self.generator.get_template('!theme/authors').filename
+ filename = self.generator.get_template("!theme/authors").filename
expected_path = os.path.join(
- os.path.dirname(CUR_DIR), 'themes', 'notmyidea', 'templates')
+ os.path.dirname(CUR_DIR), "themes", "notmyidea", "templates"
+ )
self.assertEqual(expected_path, os.path.dirname(filename))
- self.assertEqual('authors.html', os.path.basename(filename))
+ self.assertEqual("authors.html", os.path.basename(filename))
def test_bad_prefix(self):
"""
- Test unknown/bad theme prefix throws exception.
+ Test unknown/bad theme prefix throws exception.
"""
- self.assertRaises(PelicanTemplateNotFound, self.generator.get_template,
- '!UNKNOWN/authors')
+ self.assertRaises(
+ PelicanTemplateNotFound, self.generator.get_template, "!UNKNOWN/authors"
+ )
class TestArticlesGenerator(unittest.TestCase):
-
@classmethod
def setUpClass(cls):
settings = get_settings()
- settings['DEFAULT_CATEGORY'] = 'Default'
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['READERS'] = {'asc': None}
- settings['CACHE_CONTENT'] = False
+ settings["DEFAULT_CATEGORY"] = "Default"
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["READERS"] = {"asc": None}
+ settings["CACHE_CONTENT"] = False
context = get_context(settings)
cls.generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
cls.generator.generate_context()
cls.articles = cls.distill_articles(cls.generator.articles)
cls.drafts = cls.distill_articles(cls.generator.drafts)
cls.hidden_articles = cls.distill_articles(cls.generator.hidden_articles)
def setUp(self):
- self.temp_cache = mkdtemp(prefix='pelican_cache.')
+ self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self):
rmtree(self.temp_cache)
@staticmethod
def distill_articles(articles):
- return [[article.title, article.status, article.category.name,
- article.template] for article in articles]
+ return [
+ [article.title, article.status, article.category.name, article.template]
+ for article in articles
+ ]
def test_generate_feeds(self):
settings = get_settings()
- settings['CACHE_PATH'] = self.temp_cache
+ settings["CACHE_PATH"] = self.temp_cache
generator = ArticlesGenerator(
- context=settings, settings=settings,
- path=None, theme=settings['THEME'], output_path=None)
+ context=settings,
+ settings=settings,
+ path=None,
+ theme=settings["THEME"],
+ output_path=None,
+ )
writer = MagicMock()
generator.generate_feeds(writer)
- writer.write_feed.assert_called_with([], settings,
- 'feeds/all.atom.xml',
- 'feeds/all.atom.xml')
+ writer.write_feed.assert_called_with(
+ [], settings, "feeds/all.atom.xml", "feeds/all.atom.xml"
+ )
generator = ArticlesGenerator(
- context=settings, settings=get_settings(FEED_ALL_ATOM=None),
- path=None, theme=settings['THEME'], output_path=None)
+ context=settings,
+ settings=get_settings(FEED_ALL_ATOM=None),
+ path=None,
+ theme=settings["THEME"],
+ output_path=None,
+ )
writer = MagicMock()
generator.generate_feeds(writer)
self.assertFalse(writer.write_feed.called)
def test_generate_feeds_override_url(self):
settings = get_settings()
- settings['CACHE_PATH'] = self.temp_cache
- settings['FEED_ALL_ATOM_URL'] = 'feeds/atom/all/'
+ settings["CACHE_PATH"] = self.temp_cache
+ settings["FEED_ALL_ATOM_URL"] = "feeds/atom/all/"
generator = ArticlesGenerator(
- context=settings, settings=settings,
- path=None, theme=settings['THEME'], output_path=None)
+ context=settings,
+ settings=settings,
+ path=None,
+ theme=settings["THEME"],
+ output_path=None,
+ )
writer = MagicMock()
generator.generate_feeds(writer)
- writer.write_feed.assert_called_with([], settings,
- 'feeds/all.atom.xml',
- 'feeds/atom/all/')
+ writer.write_feed.assert_called_with(
+ [], settings, "feeds/all.atom.xml", "feeds/atom/all/"
+ )
def test_generate_context(self):
articles_expected = [
- ['Article title', 'published', 'Default', 'article'],
- ['Article with markdown and summary metadata multi', 'published',
- 'Default', 'article'],
- ['Article with markdown and nested summary metadata', 'published',
- 'Default', 'article'],
- ['Article with markdown and summary metadata single', 'published',
- 'Default', 'article'],
- ['Article with markdown containing footnotes', 'published',
- 'Default', 'article'],
- ['Article with template', 'published', 'Default', 'custom'],
- ['Metadata tags as list!', 'published', 'Default', 'article'],
- ['Rst with filename metadata', 'published', 'yeah', 'article'],
- ['One -, two --, three --- dashes!', 'published', 'Default',
- 'article'],
- ['One -, two --, three --- dashes!', 'published', 'Default',
- 'article'],
- ['Test Markdown extensions', 'published', 'Default', 'article'],
- ['Test markdown File', 'published', 'test', 'article'],
- ['Test md File', 'published', 'test', 'article'],
- ['Test mdown File', 'published', 'test', 'article'],
- ['Test metadata duplicates', 'published', 'test', 'article'],
- ['Test mkd File', 'published', 'test', 'article'],
- ['This is a super article !', 'published', 'Yeah', 'article'],
- ['This is a super article !', 'published', 'Yeah', 'article'],
- ['Article with Nonconformant HTML meta tags', 'published',
- 'Default', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'yeah', 'article'],
- ['This is a super article !', 'published', 'Default', 'article'],
- ['Article with an inline SVG', 'published', 'Default', 'article'],
- ['Article with markdown and empty tags', 'published', 'Default',
- 'article'],
- ['This is an article with category !', 'published', 'yeah',
- 'article'],
- ['This is an article with multiple authors!', 'published',
- 'Default', 'article'],
- ['This is an article with multiple authors!', 'published',
- 'Default', 'article'],
- ['This is an article with multiple authors in list format!',
- 'published', 'Default', 'article'],
- ['This is an article with multiple authors in lastname, '
- 'firstname format!', 'published', 'Default', 'article'],
- ['This is an article without category !', 'published', 'Default',
- 'article'],
- ['This is an article without category !', 'published',
- 'TestCategory', 'article'],
- ['An Article With Code Block To Test Typogrify Ignore',
- 'published', 'Default', 'article'],
- ['マックOS X 10.8でパイソンとVirtualenvをインストールと設定',
- 'published', '指導書', 'article'],
+ ["Article title", "published", "Default", "article"],
+ [
+ "Article with markdown and summary metadata multi",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "Article with markdown and nested summary metadata",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "Article with markdown and summary metadata single",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "Article with markdown containing footnotes",
+ "published",
+ "Default",
+ "article",
+ ],
+ ["Article with template", "published", "Default", "custom"],
+ ["Metadata tags as list!", "published", "Default", "article"],
+ ["Rst with filename metadata", "published", "yeah", "article"],
+ ["One -, two --, three --- dashes!", "published", "Default", "article"],
+ ["One -, two --, three --- dashes!", "published", "Default", "article"],
+ ["Test Markdown extensions", "published", "Default", "article"],
+ ["Test markdown File", "published", "test", "article"],
+ ["Test md File", "published", "test", "article"],
+ ["Test mdown File", "published", "test", "article"],
+ ["Test metadata duplicates", "published", "test", "article"],
+ ["Test mkd File", "published", "test", "article"],
+ ["This is a super article !", "published", "Yeah", "article"],
+ ["This is a super article !", "published", "Yeah", "article"],
+ [
+ "Article with Nonconformant HTML meta tags",
+ "published",
+ "Default",
+ "article",
+ ],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "yeah", "article"],
+ ["This is a super article !", "published", "Default", "article"],
+ ["Article with an inline SVG", "published", "Default", "article"],
+ ["Article with markdown and empty tags", "published", "Default", "article"],
+ ["This is an article with category !", "published", "yeah", "article"],
+ [
+ "This is an article with multiple authors!",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "This is an article with multiple authors!",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "This is an article with multiple authors in list format!",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "This is an article with multiple authors in lastname, "
+ "firstname format!",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "This is an article without category !",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "This is an article without category !",
+ "published",
+ "TestCategory",
+ "article",
+ ],
+ [
+ "An Article With Code Block To Test Typogrify Ignore",
+ "published",
+ "Default",
+ "article",
+ ],
+ [
+ "マックOS X 10.8でパイソンとVirtualenvをインストールと設定",
+ "published",
+ "指導書",
+ "article",
+ ],
]
self.assertEqual(sorted(articles_expected), sorted(self.articles))
def test_articles_draft(self):
draft_articles_expected = [
- ['Draft article', 'draft', 'Default', 'article'],
+ ["Draft article", "draft", "Default", "article"],
]
self.assertEqual(sorted(draft_articles_expected), sorted(self.drafts))
def test_articles_hidden(self):
hidden_articles_expected = [
- ['Hidden article', 'hidden', 'Default', 'article'],
+ ["Hidden article", "hidden", "Default", "article"],
]
self.assertEqual(sorted(hidden_articles_expected), sorted(self.hidden_articles))
@@ -301,27 +391,30 @@ class TestArticlesGenerator(unittest.TestCase):
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in self.generator.categories]
categories_alternatives = (
- sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書']),
- sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']),
+ sorted(["Default", "TestCategory", "Yeah", "test", "指導書"]),
+ sorted(["Default", "TestCategory", "yeah", "test", "指導書"]),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in self.generator.categories]
- categories_expected = ['default', 'testcategory', 'yeah', 'test',
- 'zhi-dao-shu']
+ categories_expected = ["default", "testcategory", "yeah", "test", "zhi-dao-shu"]
self.assertEqual(sorted(categories), sorted(categories_expected))
def test_do_not_use_folder_as_category(self):
settings = get_settings()
- settings['DEFAULT_CATEGORY'] = 'Default'
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['USE_FOLDER_AS_CATEGORY'] = False
- settings['CACHE_PATH'] = self.temp_cache
- settings['READERS'] = {'asc': None}
+ settings["DEFAULT_CATEGORY"] = "Default"
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["USE_FOLDER_AS_CATEGORY"] = False
+ settings["CACHE_PATH"] = self.temp_cache
+ settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
# test for name
# categories are grouped by slug; if two categories have the same slug
@@ -329,61 +422,79 @@ class TestArticlesGenerator(unittest.TestCase):
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in generator.categories]
categories_alternatives = (
- sorted(['Default', 'Yeah', 'test', '指導書']),
- sorted(['Default', 'yeah', 'test', '指導書']),
+ sorted(["Default", "Yeah", "test", "指導書"]),
+ sorted(["Default", "yeah", "test", "指導書"]),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in generator.categories]
- categories_expected = ['default', 'yeah', 'test', 'zhi-dao-shu']
+ categories_expected = ["default", "yeah", "test", "zhi-dao-shu"]
self.assertEqual(sorted(categories), sorted(categories_expected))
def test_direct_templates_save_as_url_default(self):
-
settings = get_settings()
- settings['CACHE_PATH'] = self.temp_cache
+ settings["CACHE_PATH"] = self.temp_cache
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=None, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=None,
+ theme=settings["THEME"],
+ output_path=None,
+ )
write = MagicMock()
generator.generate_direct_templates(write)
- write.assert_called_with("archives.html",
- generator.get_template("archives"), context,
- articles=generator.articles,
- dates=generator.dates, blog=True,
- template_name='archives',
- page_name='archives', url="archives.html")
+ write.assert_called_with(
+ "archives.html",
+ generator.get_template("archives"),
+ context,
+ articles=generator.articles,
+ dates=generator.dates,
+ blog=True,
+ template_name="archives",
+ page_name="archives",
+ url="archives.html",
+ )
def test_direct_templates_save_as_url_modified(self):
-
settings = get_settings()
- settings['DIRECT_TEMPLATES'] = ['archives']
- settings['ARCHIVES_SAVE_AS'] = 'archives/index.html'
- settings['ARCHIVES_URL'] = 'archives/'
- settings['CACHE_PATH'] = self.temp_cache
+ settings["DIRECT_TEMPLATES"] = ["archives"]
+ settings["ARCHIVES_SAVE_AS"] = "archives/index.html"
+ settings["ARCHIVES_URL"] = "archives/"
+ settings["CACHE_PATH"] = self.temp_cache
generator = ArticlesGenerator(
- context=settings, settings=settings,
- path=None, theme=settings['THEME'], output_path=None)
+ context=settings,
+ settings=settings,
+ path=None,
+ theme=settings["THEME"],
+ output_path=None,
+ )
write = MagicMock()
generator.generate_direct_templates(write)
- write.assert_called_with("archives/index.html",
- generator.get_template("archives"), settings,
- articles=generator.articles,
- dates=generator.dates, blog=True,
- template_name='archives',
- page_name='archives/index',
- url="archives/")
+ write.assert_called_with(
+ "archives/index.html",
+ generator.get_template("archives"),
+ settings,
+ articles=generator.articles,
+ dates=generator.dates,
+ blog=True,
+ template_name="archives",
+ page_name="archives/index",
+ url="archives/",
+ )
def test_direct_templates_save_as_false(self):
-
settings = get_settings()
- settings['DIRECT_TEMPLATES'] = ['archives']
- settings['ARCHIVES_SAVE_AS'] = False
- settings['CACHE_PATH'] = self.temp_cache
+ settings["DIRECT_TEMPLATES"] = ["archives"]
+ settings["ARCHIVES_SAVE_AS"] = False
+ settings["CACHE_PATH"] = self.temp_cache
generator = ArticlesGenerator(
- context=settings, settings=settings,
- path=None, theme=settings['THEME'], output_path=None)
+ context=settings,
+ settings=settings,
+ path=None,
+ theme=settings["THEME"],
+ output_path=None,
+ )
write = MagicMock()
generator.generate_direct_templates(write)
self.assertEqual(write.call_count, 0)
@@ -392,10 +503,13 @@ class TestArticlesGenerator(unittest.TestCase):
"""
Custom template articles get the field but standard/unset are None
"""
- custom_template = ['Article with template', 'published', 'Default',
- 'custom']
- standard_template = ['This is a super article !', 'published', 'Yeah',
- 'article']
+ custom_template = ["Article with template", "published", "Default", "custom"]
+ standard_template = [
+ "This is a super article !",
+ "published",
+ "Yeah",
+ "article",
+ ]
self.assertIn(custom_template, self.articles)
self.assertIn(standard_template, self.articles)
@@ -403,126 +517,135 @@ class TestArticlesGenerator(unittest.TestCase):
"""Test correctness of the period_archives context values."""
settings = get_settings()
- settings['CACHE_PATH'] = self.temp_cache
+ settings["CACHE_PATH"] = self.temp_cache
# No period archives enabled:
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- period_archives = generator.context['period_archives']
+ period_archives = generator.context["period_archives"]
self.assertEqual(len(period_archives.items()), 0)
# Year archives enabled:
- settings['YEAR_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/index.html'
- settings['YEAR_ARCHIVE_URL'] = 'posts/{date:%Y}/'
+ settings["YEAR_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/index.html"
+ settings["YEAR_ARCHIVE_URL"] = "posts/{date:%Y}/"
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- period_archives = generator.context['period_archives']
+ period_archives = generator.context["period_archives"]
abbreviated_archives = {
- granularity: {period['period'] for period in periods}
+ granularity: {period["period"] for period in periods}
for granularity, periods in period_archives.items()
}
- expected = {'year': {(1970,), (2010,), (2012,), (2014,)}}
+ expected = {"year": {(1970,), (2010,), (2012,), (2014,)}}
self.assertEqual(expected, abbreviated_archives)
# Month archives enabled:
- settings['MONTH_ARCHIVE_SAVE_AS'] = \
- 'posts/{date:%Y}/{date:%b}/index.html'
- settings['MONTH_ARCHIVE_URL'] = \
- 'posts/{date:%Y}/{date:%b}/'
+ settings["MONTH_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/{date:%b}/index.html"
+ settings["MONTH_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/"
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- period_archives = generator.context['period_archives']
+ period_archives = generator.context["period_archives"]
abbreviated_archives = {
- granularity: {period['period'] for period in periods}
+ granularity: {period["period"] for period in periods}
for granularity, periods in period_archives.items()
}
expected = {
- 'year': {(1970,), (2010,), (2012,), (2014,)},
- 'month': {
- (1970, 'January'),
- (2010, 'December'),
- (2012, 'December'),
- (2012, 'November'),
- (2012, 'October'),
- (2014, 'February'),
+ "year": {(1970,), (2010,), (2012,), (2014,)},
+ "month": {
+ (1970, "January"),
+ (2010, "December"),
+ (2012, "December"),
+ (2012, "November"),
+ (2012, "October"),
+ (2014, "February"),
},
}
self.assertEqual(expected, abbreviated_archives)
# Day archives enabled:
- settings['DAY_ARCHIVE_SAVE_AS'] = \
- 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
- settings['DAY_ARCHIVE_URL'] = \
- 'posts/{date:%Y}/{date:%b}/{date:%d}/'
+ settings[
+ "DAY_ARCHIVE_SAVE_AS"
+ ] = "posts/{date:%Y}/{date:%b}/{date:%d}/index.html"
+ settings["DAY_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/{date:%d}/"
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- period_archives = generator.context['period_archives']
+ period_archives = generator.context["period_archives"]
abbreviated_archives = {
- granularity: {period['period'] for period in periods}
+ granularity: {period["period"] for period in periods}
for granularity, periods in period_archives.items()
}
expected = {
- 'year': {(1970,), (2010,), (2012,), (2014,)},
- 'month': {
- (1970, 'January'),
- (2010, 'December'),
- (2012, 'December'),
- (2012, 'November'),
- (2012, 'October'),
- (2014, 'February'),
+ "year": {(1970,), (2010,), (2012,), (2014,)},
+ "month": {
+ (1970, "January"),
+ (2010, "December"),
+ (2012, "December"),
+ (2012, "November"),
+ (2012, "October"),
+ (2014, "February"),
},
- 'day': {
- (1970, 'January', 1),
- (2010, 'December', 2),
- (2012, 'December', 20),
- (2012, 'November', 29),
- (2012, 'October', 30),
- (2012, 'October', 31),
- (2014, 'February', 9),
+ "day": {
+ (1970, "January", 1),
+ (2010, "December", 2),
+ (2012, "December", 20),
+ (2012, "November", 29),
+ (2012, "October", 30),
+ (2012, "October", 31),
+ (2014, "February", 9),
},
}
self.assertEqual(expected, abbreviated_archives)
# Further item values tests
filtered_archives = [
- p for p in period_archives['day']
- if p['period'] == (2014, 'February', 9)
+ p for p in period_archives["day"] if p["period"] == (2014, "February", 9)
]
self.assertEqual(len(filtered_archives), 1)
sample_archive = filtered_archives[0]
- self.assertEqual(sample_archive['period_num'], (2014, 2, 9))
- self.assertEqual(
- sample_archive['save_as'], 'posts/2014/Feb/09/index.html')
- self.assertEqual(
- sample_archive['url'], 'posts/2014/Feb/09/')
+ self.assertEqual(sample_archive["period_num"], (2014, 2, 9))
+ self.assertEqual(sample_archive["save_as"], "posts/2014/Feb/09/index.html")
+ self.assertEqual(sample_archive["url"], "posts/2014/Feb/09/")
articles = [
- d for d in generator.articles if
- d.date.year == 2014 and
- d.date.month == 2 and
- d.date.day == 9
+ d
+ for d in generator.articles
+ if d.date.year == 2014 and d.date.month == 2 and d.date.day == 9
]
- self.assertEqual(len(sample_archive['articles']), len(articles))
+ self.assertEqual(len(sample_archive["articles"]), len(articles))
dates = [
- d for d in generator.dates if
- d.date.year == 2014 and
- d.date.month == 2 and
- d.date.day == 9
+ d
+ for d in generator.dates
+ if d.date.year == 2014 and d.date.month == 2 and d.date.day == 9
]
- self.assertEqual(len(sample_archive['dates']), len(dates))
- self.assertEqual(sample_archive['dates'][0].title, dates[0].title)
- self.assertEqual(sample_archive['dates'][0].date, dates[0].date)
+ self.assertEqual(len(sample_archive["dates"]), len(dates))
+ self.assertEqual(sample_archive["dates"][0].title, dates[0].title)
+ self.assertEqual(sample_archive["dates"][0].date, dates[0].date)
def test_period_in_timeperiod_archive(self):
"""
@@ -531,13 +654,17 @@ class TestArticlesGenerator(unittest.TestCase):
"""
settings = get_settings()
- settings['YEAR_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/index.html'
- settings['YEAR_ARCHIVE_URL'] = 'posts/{date:%Y}/'
- settings['CACHE_PATH'] = self.temp_cache
+ settings["YEAR_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/index.html"
+ settings["YEAR_ARCHIVE_URL"] = "posts/{date:%Y}/"
+ settings["CACHE_PATH"] = self.temp_cache
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
@@ -547,196 +674,257 @@ class TestArticlesGenerator(unittest.TestCase):
# among other things it must have at least been called with this
context["period"] = (1970,)
context["period_num"] = (1970,)
- write.assert_called_with("posts/1970/index.html",
- generator.get_template("period_archives"),
- context, blog=True, articles=articles,
- dates=dates, template_name='period_archives',
- url="posts/1970/",
- all_articles=generator.articles)
+ write.assert_called_with(
+ "posts/1970/index.html",
+ generator.get_template("period_archives"),
+ context,
+ blog=True,
+ articles=articles,
+ dates=dates,
+ template_name="period_archives",
+ url="posts/1970/",
+ all_articles=generator.articles,
+ )
- settings['MONTH_ARCHIVE_SAVE_AS'] = \
- 'posts/{date:%Y}/{date:%b}/index.html'
- settings['MONTH_ARCHIVE_URL'] = \
- 'posts/{date:%Y}/{date:%b}/'
+ settings["MONTH_ARCHIVE_SAVE_AS"] = "posts/{date:%Y}/{date:%b}/index.html"
+ settings["MONTH_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/"
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
- generator.generate_context()
- write = MagicMock()
- generator.generate_period_archives(write)
- dates = [d for d in generator.dates
- if d.date.year == 1970 and d.date.month == 1]
- articles = [d for d in generator.articles
- if d.date.year == 1970 and d.date.month == 1]
- self.assertEqual(len(dates), 1)
- context["period"] = (1970, "January")
- context["period_num"] = (1970, 1)
- # among other things it must have at least been called with this
- write.assert_called_with("posts/1970/Jan/index.html",
- generator.get_template("period_archives"),
- context, blog=True, articles=articles,
- dates=dates, template_name='period_archives',
- url="posts/1970/Jan/",
- all_articles=generator.articles)
-
- settings['DAY_ARCHIVE_SAVE_AS'] = \
- 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
- settings['DAY_ARCHIVE_URL'] = \
- 'posts/{date:%Y}/{date:%b}/{date:%d}/'
- context = get_context(settings)
- generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [
- d for d in generator.dates if
- d.date.year == 1970 and
- d.date.month == 1 and
- d.date.day == 1
+ d for d in generator.dates if d.date.year == 1970 and d.date.month == 1
]
articles = [
- d for d in generator.articles if
- d.date.year == 1970 and
- d.date.month == 1 and
- d.date.day == 1
+ d for d in generator.articles if d.date.year == 1970 and d.date.month == 1
+ ]
+ self.assertEqual(len(dates), 1)
+ context["period"] = (1970, "January")
+ context["period_num"] = (1970, 1)
+ # among other things it must have at least been called with this
+ write.assert_called_with(
+ "posts/1970/Jan/index.html",
+ generator.get_template("period_archives"),
+ context,
+ blog=True,
+ articles=articles,
+ dates=dates,
+ template_name="period_archives",
+ url="posts/1970/Jan/",
+ all_articles=generator.articles,
+ )
+
+ settings[
+ "DAY_ARCHIVE_SAVE_AS"
+ ] = "posts/{date:%Y}/{date:%b}/{date:%d}/index.html"
+ settings["DAY_ARCHIVE_URL"] = "posts/{date:%Y}/{date:%b}/{date:%d}/"
+ context = get_context(settings)
+ generator = ArticlesGenerator(
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
+ generator.generate_context()
+ write = MagicMock()
+ generator.generate_period_archives(write)
+ dates = [
+ d
+ for d in generator.dates
+ if d.date.year == 1970 and d.date.month == 1 and d.date.day == 1
+ ]
+ articles = [
+ d
+ for d in generator.articles
+ if d.date.year == 1970 and d.date.month == 1 and d.date.day == 1
]
self.assertEqual(len(dates), 1)
context["period"] = (1970, "January", 1)
context["period_num"] = (1970, 1, 1)
# among other things it must have at least been called with this
- write.assert_called_with("posts/1970/Jan/01/index.html",
- generator.get_template("period_archives"),
- context, blog=True, articles=articles,
- dates=dates, template_name='period_archives',
- url="posts/1970/Jan/01/",
- all_articles=generator.articles)
+ write.assert_called_with(
+ "posts/1970/Jan/01/index.html",
+ generator.get_template("period_archives"),
+ context,
+ blog=True,
+ articles=articles,
+ dates=dates,
+ template_name="period_archives",
+ url="posts/1970/Jan/01/",
+ all_articles=generator.articles,
+ )
def test_nonexistent_template(self):
"""Attempt to load a non-existent template"""
settings = get_settings()
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=None, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=None,
+ theme=settings["THEME"],
+ output_path=None,
+ )
self.assertRaises(Exception, generator.get_template, "not_a_template")
def test_generate_authors(self):
"""Check authors generation."""
authors = [author.name for author, _ in self.generator.authors]
authors_expected = sorted(
- ['Alexis Métaireau', 'Author, First', 'Author, Second',
- 'First Author', 'Second Author'])
+ [
+ "Alexis Métaireau",
+ "Author, First",
+ "Author, Second",
+ "First Author",
+ "Second Author",
+ ]
+ )
self.assertEqual(sorted(authors), authors_expected)
# test for slug
authors = [author.slug for author, _ in self.generator.authors]
- authors_expected = ['alexis-metaireau', 'author-first',
- 'author-second', 'first-author', 'second-author']
+ authors_expected = [
+ "alexis-metaireau",
+ "author-first",
+ "author-second",
+ "first-author",
+ "second-author",
+ ]
self.assertEqual(sorted(authors), sorted(authors_expected))
def test_standard_metadata_in_default_metadata(self):
settings = get_settings()
- settings['CACHE_CONTENT'] = False
- settings['DEFAULT_CATEGORY'] = 'Default'
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['DEFAULT_METADATA'] = (('author', 'Blogger'),
- # category will be ignored in favor of
- # DEFAULT_CATEGORY
- ('category', 'Random'),
- ('tags', 'general, untagged'))
+ settings["CACHE_CONTENT"] = False
+ settings["DEFAULT_CATEGORY"] = "Default"
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["DEFAULT_METADATA"] = (
+ ("author", "Blogger"),
+ # category will be ignored in favor of
+ # DEFAULT_CATEGORY
+ ("category", "Random"),
+ ("tags", "general, untagged"),
+ )
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
authors = sorted([author.name for author, _ in generator.authors])
- authors_expected = sorted(['Alexis Métaireau', 'Blogger',
- 'Author, First', 'Author, Second',
- 'First Author', 'Second Author'])
+ authors_expected = sorted(
+ [
+ "Alexis Métaireau",
+ "Blogger",
+ "Author, First",
+ "Author, Second",
+ "First Author",
+ "Second Author",
+ ]
+ )
self.assertEqual(authors, authors_expected)
- categories = sorted([category.name
- for category, _ in generator.categories])
+ categories = sorted([category.name for category, _ in generator.categories])
categories_expected = [
- sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']),
- sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書'])]
+ sorted(["Default", "TestCategory", "yeah", "test", "指導書"]),
+ sorted(["Default", "TestCategory", "Yeah", "test", "指導書"]),
+ ]
self.assertIn(categories, categories_expected)
tags = sorted([tag.name for tag in generator.tags])
- tags_expected = sorted(['bar', 'foo', 'foobar', 'general', 'untagged',
- 'パイソン', 'マック'])
+ tags_expected = sorted(
+ ["bar", "foo", "foobar", "general", "untagged", "パイソン", "マック"]
+ )
self.assertEqual(tags, tags_expected)
def test_article_order_by(self):
settings = get_settings()
- settings['DEFAULT_CATEGORY'] = 'Default'
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['ARTICLE_ORDER_BY'] = 'title'
+ settings["DEFAULT_CATEGORY"] = "Default"
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["ARTICLE_ORDER_BY"] = "title"
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
expected = [
- 'An Article With Code Block To Test Typogrify Ignore',
- 'Article title',
- 'Article with Nonconformant HTML meta tags',
- 'Article with an inline SVG',
- 'Article with markdown and empty tags',
- 'Article with markdown and nested summary metadata',
- 'Article with markdown and summary metadata multi',
- 'Article with markdown and summary metadata single',
- 'Article with markdown containing footnotes',
- 'Article with template',
- 'Metadata tags as list!',
- 'One -, two --, three --- dashes!',
- 'One -, two --, three --- dashes!',
- 'Rst with filename metadata',
- 'Test Markdown extensions',
- 'Test markdown File',
- 'Test md File',
- 'Test mdown File',
- 'Test metadata duplicates',
- 'Test mkd File',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is a super article !',
- 'This is an article with category !',
- ('This is an article with multiple authors in lastname, '
- 'firstname format!'),
- 'This is an article with multiple authors in list format!',
- 'This is an article with multiple authors!',
- 'This is an article with multiple authors!',
- 'This is an article without category !',
- 'This is an article without category !',
- 'マックOS X 10.8でパイソンとVirtualenvをインストールと設定']
+ "An Article With Code Block To Test Typogrify Ignore",
+ "Article title",
+ "Article with Nonconformant HTML meta tags",
+ "Article with an inline SVG",
+ "Article with markdown and empty tags",
+ "Article with markdown and nested summary metadata",
+ "Article with markdown and summary metadata multi",
+ "Article with markdown and summary metadata single",
+ "Article with markdown containing footnotes",
+ "Article with template",
+ "Metadata tags as list!",
+ "One -, two --, three --- dashes!",
+ "One -, two --, three --- dashes!",
+ "Rst with filename metadata",
+ "Test Markdown extensions",
+ "Test markdown File",
+ "Test md File",
+ "Test mdown File",
+ "Test metadata duplicates",
+ "Test mkd File",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is a super article !",
+ "This is an article with category !",
+ (
+ "This is an article with multiple authors in lastname, "
+ "firstname format!"
+ ),
+ "This is an article with multiple authors in list format!",
+ "This is an article with multiple authors!",
+ "This is an article with multiple authors!",
+ "This is an article without category !",
+ "This is an article without category !",
+ "マックOS X 10.8でパイソンとVirtualenvをインストールと設定",
+ ]
articles = [article.title for article in generator.articles]
self.assertEqual(articles, expected)
# reversed title
settings = get_settings()
- settings['DEFAULT_CATEGORY'] = 'Default'
- settings['DEFAULT_DATE'] = (1970, 1, 1)
- settings['ARTICLE_ORDER_BY'] = 'reversed-title'
+ settings["DEFAULT_CATEGORY"] = "Default"
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
+ settings["ARTICLE_ORDER_BY"] = "reversed-title"
context = get_context(settings)
generator = ArticlesGenerator(
- context=context, settings=settings,
- path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CONTENT_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
articles = [article.title for article in generator.articles]
@@ -750,7 +938,7 @@ class TestPageGenerator(unittest.TestCase):
# to match expected
def setUp(self):
- self.temp_cache = mkdtemp(prefix='pelican_cache.')
+ self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self):
rmtree(self.temp_cache)
@@ -760,112 +948,125 @@ class TestPageGenerator(unittest.TestCase):
def test_generate_context(self):
settings = get_settings()
- settings['CACHE_PATH'] = self.temp_cache
- settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
- settings['DEFAULT_DATE'] = (1970, 1, 1)
+ settings["CACHE_PATH"] = self.temp_cache
+ settings["PAGE_PATHS"] = ["TestPages"] # relative to CUR_DIR
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
generator = PagesGenerator(
- context=context, settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
pages = self.distill_pages(generator.pages)
hidden_pages = self.distill_pages(generator.hidden_pages)
draft_pages = self.distill_pages(generator.draft_pages)
pages_expected = [
- ['This is a test page', 'published', 'page'],
- ['This is a markdown test page', 'published', 'page'],
- ['This is a test page with a preset template', 'published',
- 'custom'],
- ['Page with a bunch of links', 'published', 'page'],
- ['Page with static links', 'published', 'page'],
- ['A Page (Test) for sorting', 'published', 'page'],
+ ["This is a test page", "published", "page"],
+ ["This is a markdown test page", "published", "page"],
+ ["This is a test page with a preset template", "published", "custom"],
+ ["Page with a bunch of links", "published", "page"],
+ ["Page with static links", "published", "page"],
+ ["A Page (Test) for sorting", "published", "page"],
]
hidden_pages_expected = [
- ['This is a test hidden page', 'hidden', 'page'],
- ['This is a markdown test hidden page', 'hidden', 'page'],
- ['This is a test hidden page with a custom template', 'hidden',
- 'custom'],
+ ["This is a test hidden page", "hidden", "page"],
+ ["This is a markdown test hidden page", "hidden", "page"],
+ ["This is a test hidden page with a custom template", "hidden", "custom"],
]
draft_pages_expected = [
- ['This is a test draft page', 'draft', 'page'],
- ['This is a markdown test draft page', 'draft', 'page'],
- ['This is a test draft page with a custom template', 'draft',
- 'custom'],
+ ["This is a test draft page", "draft", "page"],
+ ["This is a markdown test draft page", "draft", "page"],
+ ["This is a test draft page with a custom template", "draft", "custom"],
]
self.assertEqual(sorted(pages_expected), sorted(pages))
self.assertEqual(
sorted(pages_expected),
- sorted(self.distill_pages(generator.context['pages'])))
+ sorted(self.distill_pages(generator.context["pages"])),
+ )
self.assertEqual(sorted(hidden_pages_expected), sorted(hidden_pages))
self.assertEqual(sorted(draft_pages_expected), sorted(draft_pages))
self.assertEqual(
sorted(hidden_pages_expected),
- sorted(self.distill_pages(generator.context['hidden_pages'])))
+ sorted(self.distill_pages(generator.context["hidden_pages"])),
+ )
self.assertEqual(
sorted(draft_pages_expected),
- sorted(self.distill_pages(generator.context['draft_pages'])))
+ sorted(self.distill_pages(generator.context["draft_pages"])),
+ )
def test_generate_sorted(self):
settings = get_settings()
- settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
- settings['CACHE_PATH'] = self.temp_cache
- settings['DEFAULT_DATE'] = (1970, 1, 1)
+ settings["PAGE_PATHS"] = ["TestPages"] # relative to CUR_DIR
+ settings["CACHE_PATH"] = self.temp_cache
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
# default sort (filename)
pages_expected_sorted_by_filename = [
- ['This is a test page', 'published', 'page'],
- ['This is a markdown test page', 'published', 'page'],
- ['A Page (Test) for sorting', 'published', 'page'],
- ['Page with a bunch of links', 'published', 'page'],
- ['Page with static links', 'published', 'page'],
- ['This is a test page with a preset template', 'published',
- 'custom'],
+ ["This is a test page", "published", "page"],
+ ["This is a markdown test page", "published", "page"],
+ ["A Page (Test) for sorting", "published", "page"],
+ ["Page with a bunch of links", "published", "page"],
+ ["Page with static links", "published", "page"],
+ ["This is a test page with a preset template", "published", "custom"],
]
generator = PagesGenerator(
- context=context, settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_filename, pages)
# sort by title
pages_expected_sorted_by_title = [
- ['A Page (Test) for sorting', 'published', 'page'],
- ['Page with a bunch of links', 'published', 'page'],
- ['Page with static links', 'published', 'page'],
- ['This is a markdown test page', 'published', 'page'],
- ['This is a test page', 'published', 'page'],
- ['This is a test page with a preset template', 'published',
- 'custom'],
+ ["A Page (Test) for sorting", "published", "page"],
+ ["Page with a bunch of links", "published", "page"],
+ ["Page with static links", "published", "page"],
+ ["This is a markdown test page", "published", "page"],
+ ["This is a test page", "published", "page"],
+ ["This is a test page with a preset template", "published", "custom"],
]
- settings['PAGE_ORDER_BY'] = 'title'
+ settings["PAGE_ORDER_BY"] = "title"
context = get_context(settings)
generator = PagesGenerator(
- context=context.copy(), settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context.copy(),
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
# sort by title reversed
pages_expected_sorted_by_title = [
- ['This is a test page with a preset template', 'published',
- 'custom'],
- ['This is a test page', 'published', 'page'],
- ['This is a markdown test page', 'published', 'page'],
- ['Page with static links', 'published', 'page'],
- ['Page with a bunch of links', 'published', 'page'],
- ['A Page (Test) for sorting', 'published', 'page'],
+ ["This is a test page with a preset template", "published", "custom"],
+ ["This is a test page", "published", "page"],
+ ["This is a markdown test page", "published", "page"],
+ ["Page with static links", "published", "page"],
+ ["Page with a bunch of links", "published", "page"],
+ ["A Page (Test) for sorting", "published", "page"],
]
- settings['PAGE_ORDER_BY'] = 'reversed-title'
+ settings["PAGE_ORDER_BY"] = "reversed-title"
context = get_context(settings)
generator = PagesGenerator(
- context=context, settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
@@ -876,18 +1077,22 @@ class TestPageGenerator(unittest.TestCase):
are generated correctly on pages
"""
settings = get_settings()
- settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
- settings['CACHE_PATH'] = self.temp_cache
- settings['DEFAULT_DATE'] = (1970, 1, 1)
+ settings["PAGE_PATHS"] = ["TestPages"] # relative to CUR_DIR
+ settings["CACHE_PATH"] = self.temp_cache
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
generator = PagesGenerator(
- context=context, settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
pages_by_title = {p.title: p for p in generator.pages}
- test_content = pages_by_title['Page with a bunch of links'].content
+ test_content = pages_by_title["Page with a bunch of links"].content
self.assertIn('', test_content)
self.assertIn(' ', test_content)
@@ -897,80 +1102,80 @@ class TestPageGenerator(unittest.TestCase):
are included in context['static_links']
"""
settings = get_settings()
- settings['PAGE_PATHS'] = ['TestPages/page_with_static_links.md']
- settings['CACHE_PATH'] = self.temp_cache
- settings['DEFAULT_DATE'] = (1970, 1, 1)
+ settings["PAGE_PATHS"] = ["TestPages/page_with_static_links.md"]
+ settings["CACHE_PATH"] = self.temp_cache
+ settings["DEFAULT_DATE"] = (1970, 1, 1)
context = get_context(settings)
generator = PagesGenerator(
- context=context, settings=settings,
- path=CUR_DIR, theme=settings['THEME'], output_path=None)
+ context=context,
+ settings=settings,
+ path=CUR_DIR,
+ theme=settings["THEME"],
+ output_path=None,
+ )
generator.generate_context()
- self.assertIn('pelican/tests/TestPages/image0.jpg',
- context['static_links'])
- self.assertIn('pelican/tests/TestPages/image1.jpg',
- context['static_links'])
+ self.assertIn("pelican/tests/TestPages/image0.jpg", context["static_links"])
+ self.assertIn("pelican/tests/TestPages/image1.jpg", context["static_links"])
class TestTemplatePagesGenerator(TestCaseWithCLocale):
-
TEMPLATE_CONTENT = "foo: {{ foo }}"
def setUp(self):
super().setUp()
- self.temp_content = mkdtemp(prefix='pelicantests.')
- self.temp_output = mkdtemp(prefix='pelicantests.')
+ self.temp_content = mkdtemp(prefix="pelicantests.")
+ self.temp_output = mkdtemp(prefix="pelicantests.")
def tearDown(self):
rmtree(self.temp_content)
rmtree(self.temp_output)
def test_generate_output(self):
-
settings = get_settings()
- settings['STATIC_PATHS'] = ['static']
- settings['TEMPLATE_PAGES'] = {
- 'template/source.html': 'generated/file.html'
- }
+ settings["STATIC_PATHS"] = ["static"]
+ settings["TEMPLATE_PAGES"] = {"template/source.html": "generated/file.html"}
generator = TemplatePagesGenerator(
- context={'foo': 'bar'}, settings=settings,
- path=self.temp_content, theme='', output_path=self.temp_output)
+ context={"foo": "bar"},
+ settings=settings,
+ path=self.temp_content,
+ theme="",
+ output_path=self.temp_output,
+ )
# create a dummy template file
- template_dir = os.path.join(self.temp_content, 'template')
- template_path = os.path.join(template_dir, 'source.html')
+ template_dir = os.path.join(self.temp_content, "template")
+ template_path = os.path.join(template_dir, "source.html")
os.makedirs(template_dir)
- with open(template_path, 'w') as template_file:
+ with open(template_path, "w") as template_file:
template_file.write(self.TEMPLATE_CONTENT)
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
- output_path = os.path.join(self.temp_output, 'generated', 'file.html')
+ output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with open(output_path) as output_file:
- self.assertEqual(output_file.read(), 'foo: bar')
+ self.assertEqual(output_file.read(), "foo: bar")
class TestStaticGenerator(unittest.TestCase):
-
def setUp(self):
- self.content_path = os.path.join(CUR_DIR, 'mixed_content')
- self.temp_content = mkdtemp(prefix='testcontent.')
- self.temp_output = mkdtemp(prefix='testoutput.')
+ self.content_path = os.path.join(CUR_DIR, "mixed_content")
+ self.temp_content = mkdtemp(prefix="testcontent.")
+ self.temp_output = mkdtemp(prefix="testoutput.")
self.settings = get_settings()
- self.settings['PATH'] = self.temp_content
- self.settings['STATIC_PATHS'] = ["static"]
- self.settings['OUTPUT_PATH'] = self.temp_output
+ self.settings["PATH"] = self.temp_content
+ self.settings["STATIC_PATHS"] = ["static"]
+ self.settings["OUTPUT_PATH"] = self.temp_output
os.mkdir(os.path.join(self.temp_content, "static"))
- self.startfile = os.path.join(self.temp_content,
- "static", "staticfile")
+ self.startfile = os.path.join(self.temp_content, "static", "staticfile")
self.endfile = os.path.join(self.temp_output, "static", "staticfile")
self.generator = StaticGenerator(
context=get_context(),
@@ -978,7 +1183,7 @@ class TestStaticGenerator(unittest.TestCase):
path=self.temp_content,
theme="",
output_path=self.temp_output,
- )
+ )
def tearDown(self):
rmtree(self.temp_content)
@@ -989,155 +1194,198 @@ class TestStaticGenerator(unittest.TestCase):
def test_theme_static_paths_dirs(self):
"""Test that StaticGenerator properly copies also files mentioned in
- TEMPLATE_STATIC_PATHS, not just directories."""
+ TEMPLATE_STATIC_PATHS, not just directories."""
settings = get_settings(PATH=self.content_path)
context = get_context(settings, staticfiles=[])
StaticGenerator(
- context=context, settings=settings,
- path=settings['PATH'], output_path=self.temp_output,
- theme=settings['THEME']).generate_output(None)
+ context=context,
+ settings=settings,
+ path=settings["PATH"],
+ output_path=self.temp_output,
+ theme=settings["THEME"],
+ ).generate_output(None)
# The content of dirs listed in THEME_STATIC_PATHS (defaulting to
# "static") is put into the output
- self.assertTrue(os.path.isdir(os.path.join(self.temp_output,
- "theme/css/")))
- self.assertTrue(os.path.isdir(os.path.join(self.temp_output,
- "theme/fonts/")))
+ self.assertTrue(os.path.isdir(os.path.join(self.temp_output, "theme/css/")))
+ self.assertTrue(os.path.isdir(os.path.join(self.temp_output, "theme/fonts/")))
def test_theme_static_paths_files(self):
"""Test that StaticGenerator properly copies also files mentioned in
- TEMPLATE_STATIC_PATHS, not just directories."""
+ TEMPLATE_STATIC_PATHS, not just directories."""
settings = get_settings(
PATH=self.content_path,
- THEME_STATIC_PATHS=['static/css/fonts.css', 'static/fonts/'],)
+ THEME_STATIC_PATHS=["static/css/fonts.css", "static/fonts/"],
+ )
context = get_context(settings, staticfiles=[])
StaticGenerator(
- context=context, settings=settings,
- path=settings['PATH'], output_path=self.temp_output,
- theme=settings['THEME']).generate_output(None)
+ context=context,
+ settings=settings,
+ path=settings["PATH"],
+ output_path=self.temp_output,
+ theme=settings["THEME"],
+ ).generate_output(None)
# Only the content of dirs and files listed in THEME_STATIC_PATHS are
# put into the output, not everything from static/
- self.assertFalse(os.path.isdir(os.path.join(self.temp_output,
- "theme/css/")))
- self.assertFalse(os.path.isdir(os.path.join(self.temp_output,
- "theme/fonts/")))
+ self.assertFalse(os.path.isdir(os.path.join(self.temp_output, "theme/css/")))
+ self.assertFalse(os.path.isdir(os.path.join(self.temp_output, "theme/fonts/")))
- self.assertTrue(os.path.isfile(os.path.join(
- self.temp_output, "theme/Yanone_Kaffeesatz_400.eot")))
- self.assertTrue(os.path.isfile(os.path.join(
- self.temp_output, "theme/Yanone_Kaffeesatz_400.svg")))
- self.assertTrue(os.path.isfile(os.path.join(
- self.temp_output, "theme/Yanone_Kaffeesatz_400.ttf")))
- self.assertTrue(os.path.isfile(os.path.join(
- self.temp_output, "theme/Yanone_Kaffeesatz_400.woff")))
- self.assertTrue(os.path.isfile(os.path.join(
- self.temp_output, "theme/Yanone_Kaffeesatz_400.woff2")))
- self.assertTrue(os.path.isfile(os.path.join(self.temp_output,
- "theme/font.css")))
- self.assertTrue(os.path.isfile(os.path.join(self.temp_output,
- "theme/fonts.css")))
+ self.assertTrue(
+ os.path.isfile(
+ os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.eot")
+ )
+ )
+ self.assertTrue(
+ os.path.isfile(
+ os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.svg")
+ )
+ )
+ self.assertTrue(
+ os.path.isfile(
+ os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.ttf")
+ )
+ )
+ self.assertTrue(
+ os.path.isfile(
+ os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.woff")
+ )
+ )
+ self.assertTrue(
+ os.path.isfile(
+ os.path.join(self.temp_output, "theme/Yanone_Kaffeesatz_400.woff2")
+ )
+ )
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.temp_output, "theme/font.css"))
+ )
+ self.assertTrue(
+ os.path.isfile(os.path.join(self.temp_output, "theme/fonts.css"))
+ )
def test_static_excludes(self):
- """Test that StaticGenerator respects STATIC_EXCLUDES.
- """
+ """Test that StaticGenerator respects STATIC_EXCLUDES."""
settings = get_settings(
- STATIC_EXCLUDES=['subdir'],
+ STATIC_EXCLUDES=["subdir"],
PATH=self.content_path,
- STATIC_PATHS=[''],)
+ STATIC_PATHS=[""],
+ )
context = get_context(settings)
StaticGenerator(
- context=context, settings=settings,
- path=settings['PATH'], output_path=self.temp_output,
- theme=settings['THEME']).generate_context()
+ context=context,
+ settings=settings,
+ path=settings["PATH"],
+ output_path=self.temp_output,
+ theme=settings["THEME"],
+ ).generate_context()
- staticnames = [os.path.basename(c.source_path)
- for c in context['staticfiles']]
+ staticnames = [os.path.basename(c.source_path) for c in context["staticfiles"]]
self.assertNotIn(
- 'subdir_fake_image.jpg', staticnames,
- "StaticGenerator processed a file in a STATIC_EXCLUDES directory")
+ "subdir_fake_image.jpg",
+ staticnames,
+ "StaticGenerator processed a file in a STATIC_EXCLUDES directory",
+ )
self.assertIn(
- 'fake_image.jpg', staticnames,
- "StaticGenerator skipped a file that it should have included")
+ "fake_image.jpg",
+ staticnames,
+ "StaticGenerator skipped a file that it should have included",
+ )
def test_static_exclude_sources(self):
- """Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES.
- """
+ """Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES."""
settings = get_settings(
STATIC_EXCLUDE_SOURCES=True,
PATH=self.content_path,
- PAGE_PATHS=[''],
- STATIC_PATHS=[''],
- CACHE_CONTENT=False,)
+ PAGE_PATHS=[""],
+ STATIC_PATHS=[""],
+ CACHE_CONTENT=False,
+ )
context = get_context(settings)
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(
- context=context, settings=settings,
- path=settings['PATH'], output_path=self.temp_output,
- theme=settings['THEME']).generate_context()
+ context=context,
+ settings=settings,
+ path=settings["PATH"],
+ output_path=self.temp_output,
+ theme=settings["THEME"],
+ ).generate_context()
- staticnames = [os.path.basename(c.source_path)
- for c in context['staticfiles']]
+ staticnames = [os.path.basename(c.source_path) for c in context["staticfiles"]]
self.assertFalse(
any(name.endswith(".md") for name in staticnames),
- "STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file")
+ "STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file",
+ )
settings.update(STATIC_EXCLUDE_SOURCES=False)
context = get_context(settings)
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(
- context=context, settings=settings,
- path=settings['PATH'], output_path=self.temp_output,
- theme=settings['THEME']).generate_context()
+ context=context,
+ settings=settings,
+ path=settings["PATH"],
+ output_path=self.temp_output,
+ theme=settings["THEME"],
+ ).generate_context()
- staticnames = [os.path.basename(c.source_path)
- for c in context['staticfiles']]
+ staticnames = [os.path.basename(c.source_path) for c in context["staticfiles"]]
self.assertTrue(
any(name.endswith(".md") for name in staticnames),
- "STATIC_EXCLUDE_SOURCES=False failed to include a markdown file")
+ "STATIC_EXCLUDE_SOURCES=False failed to include a markdown file",
+ )
def test_static_links(self):
- """Test that StaticGenerator uses files in static_links
- """
+ """Test that StaticGenerator uses files in static_links"""
settings = get_settings(
- STATIC_EXCLUDES=['subdir'],
+ STATIC_EXCLUDES=["subdir"],
PATH=self.content_path,
- STATIC_PATHS=[],)
+ STATIC_PATHS=[],
+ )
context = get_context(settings)
- context['static_links'] |= {'short_page.md', 'subdir_fake_image.jpg'}
+ context["static_links"] |= {"short_page.md", "subdir_fake_image.jpg"}
StaticGenerator(
- context=context, settings=settings,
- path=settings['PATH'], output_path=self.temp_output,
- theme=settings['THEME']).generate_context()
+ context=context,
+ settings=settings,
+ path=settings["PATH"],
+ output_path=self.temp_output,
+ theme=settings["THEME"],
+ ).generate_context()
staticfiles_names = [
- os.path.basename(c.source_path) for c in context['staticfiles']]
+ os.path.basename(c.source_path) for c in context["staticfiles"]
+ ]
- static_content_names = [
- os.path.basename(c) for c in context['static_content']]
+ static_content_names = [os.path.basename(c) for c in context["static_content"]]
self.assertIn(
- 'short_page.md', staticfiles_names,
- "StaticGenerator skipped a file that it should have included")
+ "short_page.md",
+ staticfiles_names,
+ "StaticGenerator skipped a file that it should have included",
+ )
self.assertIn(
- 'short_page.md', static_content_names,
- "StaticGenerator skipped a file that it should have included")
+ "short_page.md",
+ static_content_names,
+ "StaticGenerator skipped a file that it should have included",
+ )
self.assertIn(
- 'subdir_fake_image.jpg', staticfiles_names,
- "StaticGenerator skipped a file that it should have included")
+ "subdir_fake_image.jpg",
+ staticfiles_names,
+ "StaticGenerator skipped a file that it should have included",
+ )
self.assertIn(
- 'subdir_fake_image.jpg', static_content_names,
- "StaticGenerator skipped a file that it should have included")
+ "subdir_fake_image.jpg",
+ static_content_names,
+ "StaticGenerator skipped a file that it should have included",
+ )
def test_copy_one_file(self):
with open(self.startfile, "w") as f:
@@ -1160,7 +1408,7 @@ class TestStaticGenerator(unittest.TestCase):
staticfile = MagicMock()
staticfile.source_path = self.startfile
staticfile.save_as = self.endfile
- self.settings['STATIC_CHECK_IF_MODIFIED'] = True
+ self.settings["STATIC_CHECK_IF_MODIFIED"] = True
with open(staticfile.source_path, "w") as f:
f.write("a")
os.mkdir(os.path.join(self.temp_output, "static"))
@@ -1181,7 +1429,7 @@ class TestStaticGenerator(unittest.TestCase):
self.assertTrue(isnewer)
def test_skip_file_when_source_is_not_newer(self):
- self.settings['STATIC_CHECK_IF_MODIFIED'] = True
+ self.settings["STATIC_CHECK_IF_MODIFIED"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
@@ -1201,7 +1449,7 @@ class TestStaticGenerator(unittest.TestCase):
self.assertFalse(os.path.samefile(self.startfile, self.endfile))
def test_output_file_is_linked_to_source(self):
- self.settings['STATIC_CREATE_LINKS'] = True
+ self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
self.generator.generate_context()
@@ -1209,7 +1457,7 @@ class TestStaticGenerator(unittest.TestCase):
self.assertTrue(os.path.samefile(self.startfile, self.endfile))
def test_output_file_exists_and_is_newer(self):
- self.settings['STATIC_CREATE_LINKS'] = True
+ self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
@@ -1219,9 +1467,9 @@ class TestStaticGenerator(unittest.TestCase):
self.generator.generate_output(None)
self.assertTrue(os.path.samefile(self.startfile, self.endfile))
- @unittest.skipUnless(can_symlink(), 'No symlink privilege')
+ @unittest.skipUnless(can_symlink(), "No symlink privilege")
def test_can_symlink_when_hardlink_not_possible(self):
- self.settings['STATIC_CREATE_LINKS'] = True
+ self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
@@ -1230,9 +1478,9 @@ class TestStaticGenerator(unittest.TestCase):
self.generator.generate_output(None)
self.assertTrue(os.path.islink(self.endfile))
- @unittest.skipUnless(can_symlink(), 'No symlink privilege')
+ @unittest.skipUnless(can_symlink(), "No symlink privilege")
def test_existing_symlink_is_considered_up_to_date(self):
- self.settings['STATIC_CREATE_LINKS'] = True
+ self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
@@ -1243,9 +1491,9 @@ class TestStaticGenerator(unittest.TestCase):
requires_update = self.generator._file_update_required(staticfile)
self.assertFalse(requires_update)
- @unittest.skipUnless(can_symlink(), 'No symlink privilege')
+ @unittest.skipUnless(can_symlink(), "No symlink privilege")
def test_invalid_symlink_is_overwritten(self):
- self.settings['STATIC_CREATE_LINKS'] = True
+ self.settings["STATIC_CREATE_LINKS"] = True
with open(self.startfile, "w") as f:
f.write("staticcontent")
os.mkdir(os.path.join(self.temp_output, "static"))
@@ -1263,14 +1511,14 @@ class TestStaticGenerator(unittest.TestCase):
# os.path.realpath is broken on Windows before python3.8 for symlinks.
# This is a (ugly) workaround.
# see: https://bugs.python.org/issue9949
- if os.name == 'nt' and sys.version_info < (3, 8):
+ if os.name == "nt" and sys.version_info < (3, 8):
+
def get_real_path(path):
return os.readlink(path) if os.path.islink(path) else path
else:
get_real_path = os.path.realpath
- self.assertEqual(get_real_path(self.endfile),
- get_real_path(self.startfile))
+ self.assertEqual(get_real_path(self.endfile), get_real_path(self.startfile))
def test_delete_existing_file_before_mkdir(self):
with open(self.startfile, "w") as f:
@@ -1279,16 +1527,14 @@ class TestStaticGenerator(unittest.TestCase):
f.write("This file should be a directory")
self.generator.generate_context()
self.generator.generate_output(None)
- self.assertTrue(
- os.path.isdir(os.path.join(self.temp_output, "static")))
+ self.assertTrue(os.path.isdir(os.path.join(self.temp_output, "static")))
self.assertTrue(os.path.isfile(self.endfile))
class TestJinja2Environment(TestCaseWithCLocale):
-
def setUp(self):
- self.temp_content = mkdtemp(prefix='pelicantests.')
- self.temp_output = mkdtemp(prefix='pelicantests.')
+ self.temp_content = mkdtemp(prefix="pelicantests.")
+ self.temp_output = mkdtemp(prefix="pelicantests.")
def tearDown(self):
rmtree(self.temp_content)
@@ -1296,27 +1542,29 @@ class TestJinja2Environment(TestCaseWithCLocale):
def _test_jinja2_helper(self, additional_settings, content, expected):
settings = get_settings()
- settings['STATIC_PATHS'] = ['static']
- settings['TEMPLATE_PAGES'] = {
- 'template/source.html': 'generated/file.html'
- }
+ settings["STATIC_PATHS"] = ["static"]
+ settings["TEMPLATE_PAGES"] = {"template/source.html": "generated/file.html"}
settings.update(additional_settings)
generator = TemplatePagesGenerator(
- context={'foo': 'foo', 'bar': 'bar'}, settings=settings,
- path=self.temp_content, theme='', output_path=self.temp_output)
+ context={"foo": "foo", "bar": "bar"},
+ settings=settings,
+ path=self.temp_content,
+ theme="",
+ output_path=self.temp_output,
+ )
# create a dummy template file
- template_dir = os.path.join(self.temp_content, 'template')
- template_path = os.path.join(template_dir, 'source.html')
+ template_dir = os.path.join(self.temp_content, "template")
+ template_path = os.path.join(template_dir, "source.html")
os.makedirs(template_dir)
- with open(template_path, 'w') as template_file:
+ with open(template_path, "w") as template_file:
template_file.write(content)
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
- output_path = os.path.join(self.temp_output, 'generated', 'file.html')
+ output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
@@ -1327,32 +1575,32 @@ class TestJinja2Environment(TestCaseWithCLocale):
def test_jinja2_filter(self):
"""JINJA_FILTERS adds custom filters to Jinja2 environment"""
- content = 'foo: {{ foo|custom_filter }}, bar: {{ bar|custom_filter }}'
- settings = {'JINJA_FILTERS': {'custom_filter': lambda x: x.upper()}}
- expected = 'foo: FOO, bar: BAR'
+ content = "foo: {{ foo|custom_filter }}, bar: {{ bar|custom_filter }}"
+ settings = {"JINJA_FILTERS": {"custom_filter": lambda x: x.upper()}}
+ expected = "foo: FOO, bar: BAR"
self._test_jinja2_helper(settings, content, expected)
def test_jinja2_test(self):
"""JINJA_TESTS adds custom tests to Jinja2 environment"""
- content = 'foo {{ foo is custom_test }}, bar {{ bar is custom_test }}'
- settings = {'JINJA_TESTS': {'custom_test': lambda x: x == 'bar'}}
- expected = 'foo False, bar True'
+ content = "foo {{ foo is custom_test }}, bar {{ bar is custom_test }}"
+ settings = {"JINJA_TESTS": {"custom_test": lambda x: x == "bar"}}
+ expected = "foo False, bar True"
self._test_jinja2_helper(settings, content, expected)
def test_jinja2_global(self):
"""JINJA_GLOBALS adds custom globals to Jinja2 environment"""
- content = '{{ custom_global }}'
- settings = {'JINJA_GLOBALS': {'custom_global': 'foobar'}}
- expected = 'foobar'
+ content = "{{ custom_global }}"
+ settings = {"JINJA_GLOBALS": {"custom_global": "foobar"}}
+ expected = "foobar"
self._test_jinja2_helper(settings, content, expected)
def test_jinja2_extension(self):
"""JINJA_ENVIRONMENT adds extensions to Jinja2 environment"""
- content = '{% set stuff = [] %}{% do stuff.append(1) %}{{ stuff }}'
- settings = {'JINJA_ENVIRONMENT': {'extensions': ['jinja2.ext.do']}}
- expected = '[1]'
+ content = "{% set stuff = [] %}{% do stuff.append(1) %}{{ stuff }}"
+ settings = {"JINJA_ENVIRONMENT": {"extensions": ["jinja2.ext.do"]}}
+ expected = "[1]"
self._test_jinja2_helper(settings, content, expected)
diff --git a/pelican/tests/test_importer.py b/pelican/tests/test_importer.py
index 870d3001..05ef5bbd 100644
--- a/pelican/tests/test_importer.py
+++ b/pelican/tests/test_importer.py
@@ -4,26 +4,35 @@ from posixpath import join as posix_join
from unittest.mock import patch
from pelican.settings import DEFAULT_CONFIG
-from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder,
- unittest, TestCaseWithCLocale)
-from pelican.tools.pelican_import import (blogger2fields, build_header,
- build_markdown_header,
- decode_wp_content,
- download_attachments, fields2pelican,
- get_attachments, tumblr2fields,
- wp2fields,
- )
+from pelican.tests.support import (
+ mute,
+ skipIfNoExecutable,
+ temporary_folder,
+ unittest,
+ TestCaseWithCLocale,
+)
+from pelican.tools.pelican_import import (
+ blogger2fields,
+ build_header,
+ build_markdown_header,
+ decode_wp_content,
+ download_attachments,
+ fields2pelican,
+ get_attachments,
+ tumblr2fields,
+ wp2fields,
+)
from pelican.utils import path_to_file_url, slugify
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
-BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'bloggerexport.xml')
-WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
-WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
- 'content',
- 'wordpress_content_encoded')
-WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
- 'content',
- 'wordpress_content_decoded')
+BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, "content", "bloggerexport.xml")
+WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, "content", "wordpressexport.xml")
+WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(
+ CUR_DIR, "content", "wordpress_content_encoded"
+)
+WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(
+ CUR_DIR, "content", "wordpress_content_decoded"
+)
try:
from bs4 import BeautifulSoup
@@ -36,10 +45,9 @@ except ImportError:
LXML = False
-@skipIfNoExecutable(['pandoc', '--version'])
-@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
+@skipIfNoExecutable(["pandoc", "--version"])
+@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestBloggerXmlImporter(TestCaseWithCLocale):
-
def setUp(self):
super().setUp()
self.posts = blogger2fields(BLOGGER_XML_SAMPLE)
@@ -50,16 +58,17 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
"""
test_posts = list(self.posts)
kinds = {x[8] for x in test_posts}
- self.assertEqual({'page', 'article', 'comment'}, kinds)
- page_titles = {x[0] for x in test_posts if x[8] == 'page'}
- self.assertEqual({'Test page', 'Test page 2'}, page_titles)
- article_titles = {x[0] for x in test_posts if x[8] == 'article'}
- self.assertEqual({'Black as Egypt\'s Night', 'The Steel Windpipe'},
- article_titles)
- comment_titles = {x[0] for x in test_posts if x[8] == 'comment'}
- self.assertEqual({'Mishka, always a pleasure to read your '
- 'adventures!...'},
- comment_titles)
+ self.assertEqual({"page", "article", "comment"}, kinds)
+ page_titles = {x[0] for x in test_posts if x[8] == "page"}
+ self.assertEqual({"Test page", "Test page 2"}, page_titles)
+ article_titles = {x[0] for x in test_posts if x[8] == "article"}
+ self.assertEqual(
+ {"Black as Egypt's Night", "The Steel Windpipe"}, article_titles
+ )
+ comment_titles = {x[0] for x in test_posts if x[8] == "comment"}
+ self.assertEqual(
+ {"Mishka, always a pleasure to read your " "adventures!..."}, comment_titles
+ )
def test_recognise_status_with_correct_filename(self):
"""Check that importerer outputs only statuses 'published' and 'draft',
@@ -67,24 +76,25 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
"""
test_posts = list(self.posts)
statuses = {x[7] for x in test_posts}
- self.assertEqual({'published', 'draft'}, statuses)
+ self.assertEqual({"published", "draft"}, statuses)
- draft_filenames = {x[2] for x in test_posts if x[7] == 'draft'}
+ draft_filenames = {x[2] for x in test_posts if x[7] == "draft"}
# draft filenames are id-based
- self.assertEqual({'page-4386962582497458967',
- 'post-1276418104709695660'}, draft_filenames)
+ self.assertEqual(
+ {"page-4386962582497458967", "post-1276418104709695660"}, draft_filenames
+ )
- published_filenames = {x[2] for x in test_posts if x[7] == 'published'}
+ published_filenames = {x[2] for x in test_posts if x[7] == "published"}
# published filenames are url-based, except comments
- self.assertEqual({'the-steel-windpipe',
- 'test-page',
- 'post-5590533389087749201'}, published_filenames)
+ self.assertEqual(
+ {"the-steel-windpipe", "test-page", "post-5590533389087749201"},
+ published_filenames,
+ )
-@skipIfNoExecutable(['pandoc', '--version'])
-@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
+@skipIfNoExecutable(["pandoc", "--version"])
+@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestWordpressXmlImporter(TestCaseWithCLocale):
-
def setUp(self):
super().setUp()
self.posts = wp2fields(WORDPRESS_XML_SAMPLE)
@@ -92,30 +102,49 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_ignore_empty_posts(self):
self.assertTrue(self.posts)
- for (title, content, fname, date, author,
- categ, tags, status, kind, format) in self.posts:
+ for (
+ title,
+ content,
+ fname,
+ date,
+ author,
+ categ,
+ tags,
+ status,
+ kind,
+ format,
+ ) in self.posts:
self.assertTrue(title.strip())
def test_recognise_page_kind(self):
- """ Check that we recognise pages in wordpress, as opposed to posts """
+ """Check that we recognise pages in wordpress, as opposed to posts"""
self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = []
- for (title, content, fname, date, author,
- categ, tags, status, kind, format) in self.posts:
- if kind == 'page':
+ for (
+ title,
+ content,
+ fname,
+ date,
+ author,
+ categ,
+ tags,
+ status,
+ kind,
+ format,
+ ) in self.posts:
+ if kind == "page":
pages_data.append((title, fname))
self.assertEqual(2, len(pages_data))
- self.assertEqual(('Page', 'contact'), pages_data[0])
- self.assertEqual(('Empty Page', 'empty'), pages_data[1])
+ self.assertEqual(("Page", "contact"), pages_data[0])
+ self.assertEqual(("Empty Page", "empty"), pages_data[1])
def test_dirpage_directive_for_page_kind(self):
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp:
- fname = list(silent_f2p(test_post, 'markdown',
- temp, dirpage=True))[0]
- self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
+ fname = list(silent_f2p(test_post, "markdown", temp, dirpage=True))[0]
+ self.assertTrue(fname.endswith("pages%sempty.md" % os.path.sep))
def test_dircat(self):
silent_f2p = mute(True)(fields2pelican)
@@ -125,14 +154,13 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
if len(post[5]) > 0: # Has a category
test_posts.append(post)
with temporary_folder() as temp:
- fnames = list(silent_f2p(test_posts, 'markdown',
- temp, dircat=True))
- subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
+ fnames = list(silent_f2p(test_posts, "markdown", temp, dircat=True))
+ subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
index = 0
for post in test_posts:
name = post[2]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
- name += '.md'
+ name += ".md"
filename = os.path.join(category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
@@ -141,9 +169,19 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts)
pages_data = []
- for (title, content, fname, date, author, categ,
- tags, status, kind, format) in self.posts:
- if kind == 'page' or kind == 'article':
+ for (
+ title,
+ content,
+ fname,
+ date,
+ author,
+ categ,
+ tags,
+ status,
+ kind,
+ format,
+ ) in self.posts:
+ if kind == "page" or kind == "article":
pass
else:
pages_data.append((title, fname))
@@ -152,40 +190,45 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts)
cust_data = []
- for (title, content, fname, date, author, categ,
- tags, status, kind, format) in self.custposts:
- if kind == 'article' or kind == 'page':
+ for (
+ title,
+ content,
+ fname,
+ date,
+ author,
+ categ,
+ tags,
+ status,
+ kind,
+ format,
+ ) in self.custposts:
+ if kind == "article" or kind == "page":
pass
else:
cust_data.append((title, kind))
self.assertEqual(3, len(cust_data))
+ self.assertEqual(("A custom post in category 4", "custom1"), cust_data[0])
+ self.assertEqual(("A custom post in category 5", "custom1"), cust_data[1])
self.assertEqual(
- ('A custom post in category 4', 'custom1'),
- cust_data[0])
- self.assertEqual(
- ('A custom post in category 5', 'custom1'),
- cust_data[1])
- self.assertEqual(
- ('A 2nd custom post type also in category 5', 'custom2'),
- cust_data[2])
+ ("A 2nd custom post type also in category 5", "custom2"), cust_data[2]
+ )
def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
- if post[8] == 'article' or post[8] == 'page':
+ if post[8] == "article" or post[8] == "page":
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
- fnames = list(silent_f2p(test_posts, 'markdown',
- temp, wp_custpost=True))
+ fnames = list(silent_f2p(test_posts, "markdown", temp, wp_custpost=True))
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
- name += '.md'
+ name += ".md"
filename = os.path.join(kind, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
@@ -196,20 +239,21 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = []
for post in self.custposts:
# check post kind
- if post[8] == 'article' or post[8] == 'page':
+ if post[8] == "article" or post[8] == "page":
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
- fnames = list(silent_f2p(test_posts, 'markdown', temp,
- wp_custpost=True, dircat=True))
- subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
+ fnames = list(
+ silent_f2p(test_posts, "markdown", temp, wp_custpost=True, dircat=True)
+ )
+ subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
- name += '.md'
+ name += ".md"
filename = os.path.join(kind, category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
@@ -221,16 +265,19 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = []
for post in self.custposts:
# check post kind
- if post[8] == 'page':
+ if post[8] == "page":
test_posts.append(post)
with temporary_folder() as temp:
- fnames = list(silent_f2p(test_posts, 'markdown', temp,
- wp_custpost=True, dirpage=False))
+ fnames = list(
+ silent_f2p(
+ test_posts, "markdown", temp, wp_custpost=True, dirpage=False
+ )
+ )
index = 0
for post in test_posts:
name = post[2]
- name += '.md'
- filename = os.path.join('pages', name)
+ name += ".md"
+ filename = os.path.join("pages", name)
out_name = fnames[index]
self.assertFalse(out_name.endswith(filename))
@@ -238,117 +285,114 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = list(self.posts)
def r(f):
- with open(f, encoding='utf-8') as infile:
+ with open(f, encoding="utf-8") as infile:
return infile.read()
+
silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp:
-
- rst_files = (r(f) for f
- in silent_f2p(test_posts, 'markdown', temp))
- self.assertTrue(any('
\n"
'Now with added support for TLA .
\n')
+ 'acronym">TLA .\n'
+ )
self.assertEqual(page.content, expected)
except ImportError:
- return unittest.skip('need the typogrify distribution')
+ return unittest.skip("need the typogrify distribution")
def test_typogrify_summary(self):
# if nothing is specified in the settings, the summary should be
# unmodified
- page = self.read_file(path='article_with_metadata.rst')
- expected = ('Multi-line metadata should be'
- ' supported\nas well as inline'
- ' markup and stuff to "typogrify'
- '"...
\n')
+ page = self.read_file(path="article_with_metadata.rst")
+ expected = (
+ 'Multi-line metadata should be'
+ " supported\nas well as inline"
+ " markup and stuff to "typogrify"
+ ""...
\n"
+ )
- self.assertEqual(page.metadata['summary'], expected)
+ self.assertEqual(page.metadata["summary"], expected)
try:
# otherwise, typogrify should be applied
- page = self.read_file(path='article_with_metadata.rst',
- TYPOGRIFY=True)
- expected = ('Multi-line metadata should be'
- ' supported\nas well as inline'
- ' markup and stuff to “typogrify'
- '”…
\n')
+ page = self.read_file(path="article_with_metadata.rst", TYPOGRIFY=True)
+ expected = (
+ 'Multi-line metadata should be'
+ " supported\nas well as inline"
+ " markup and stuff to “typogrify"
+ "”…
\n"
+ )
- self.assertEqual(page.metadata['summary'], expected)
+ self.assertEqual(page.metadata["summary"], expected)
except ImportError:
- return unittest.skip('need the typogrify distribution')
+ return unittest.skip("need the typogrify distribution")
def test_typogrify_ignore_tags(self):
try:
# typogrify should be able to ignore user specified tags,
# but tries to be clever with widont extension
- page = self.read_file(path='article.rst', TYPOGRIFY=True,
- TYPOGRIFY_IGNORE_TAGS=['p'])
- expected = ('THIS is some content. With some stuff to '
- '"typogrify"...
\nNow with added '
- 'support for '
- 'TLA .
\n')
+ page = self.read_file(
+ path="article.rst", TYPOGRIFY=True, TYPOGRIFY_IGNORE_TAGS=["p"]
+ )
+ expected = (
+ "THIS is some content. With some stuff to "
+ ""typogrify"...
\nNow with added "
+ 'support for '
+ "TLA .
\n"
+ )
self.assertEqual(page.content, expected)
# typogrify should ignore code blocks by default because
# code blocks are composed inside the pre tag
- page = self.read_file(path='article_with_code_block.rst',
- TYPOGRIFY=True)
+ page = self.read_file(path="article_with_code_block.rst", TYPOGRIFY=True)
- expected = ('An article with some code
\n'
- '\n'
- 'A block quote:
\n\nx '
- '& y \n'
- 'Normal:\nx'
- ' & '
- ' y'
- '
\n')
+ expected = (
+ "An article with some code
\n"
+ '\n'
+ "A block quote:
\n\nx "
+ '& y \n'
+ "Normal:\nx"
+ ' & '
+ " y"
+ "
\n"
+ )
self.assertEqual(page.content, expected)
# instruct typogrify to also ignore blockquotes
- page = self.read_file(path='article_with_code_block.rst',
- TYPOGRIFY=True,
- TYPOGRIFY_IGNORE_TAGS=['blockquote'])
+ page = self.read_file(
+ path="article_with_code_block.rst",
+ TYPOGRIFY=True,
+ TYPOGRIFY_IGNORE_TAGS=["blockquote"],
+ )
- expected = ('An article with some code
\n'
- '\n'
- 'A block quote:
\n\nx '
- '& y \n'
- 'Normal:\nx'
- ' & '
- ' y'
- '
\n')
+ expected = (
+ "An article with some code
\n"
+ '\n'
+ "A block quote:
\n\nx "
+ "& y \n"
+ "Normal:\nx"
+ ' & '
+ " y"
+ "
\n"
+ )
self.assertEqual(page.content, expected)
except ImportError:
- return unittest.skip('need the typogrify distribution')
+ return unittest.skip("need the typogrify distribution")
except TypeError:
- return unittest.skip('need typogrify version 2.0.4 or later')
+ return unittest.skip("need typogrify version 2.0.4 or later")
def test_article_with_multiple_authors(self):
- page = self.read_file(path='article_with_multiple_authors.rst')
- expected = {
- 'authors': ['First Author', 'Second Author']
- }
+ page = self.read_file(path="article_with_multiple_authors.rst")
+ expected = {"authors": ["First Author", "Second Author"]}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_authors_semicolon(self):
- page = self.read_file(
- path='article_with_multiple_authors_semicolon.rst')
- expected = {
- 'authors': ['Author, First', 'Author, Second']
- }
+ page = self.read_file(path="article_with_multiple_authors_semicolon.rst")
+ expected = {"authors": ["Author, First", "Author, Second"]}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_authors_list(self):
- page = self.read_file(path='article_with_multiple_authors_list.rst')
- expected = {
- 'authors': ['Author, First', 'Author, Second']
- }
+ page = self.read_file(path="article_with_multiple_authors_list.rst")
+ expected = {"authors": ["Author, First", "Author, Second"]}
self.assertDictHasSubset(page.metadata, expected)
def test_default_date_formats(self):
- tuple_date = self.read_file(path='article.rst',
- DEFAULT_DATE=(2012, 5, 1))
- string_date = self.read_file(path='article.rst',
- DEFAULT_DATE='2012-05-01')
+ tuple_date = self.read_file(path="article.rst", DEFAULT_DATE=(2012, 5, 1))
+ string_date = self.read_file(path="article.rst", DEFAULT_DATE="2012-05-01")
- self.assertEqual(tuple_date.metadata['date'],
- string_date.metadata['date'])
+ self.assertEqual(tuple_date.metadata["date"], string_date.metadata["date"])
def test_parse_error(self):
# Verify that it raises an Exception, not nothing and not SystemExit or
# some such
with self.assertRaisesRegex(Exception, "underline too short"):
- self.read_file(path='../parse_error/parse_error.rst')
+ self.read_file(path="../parse_error/parse_error.rst")
def test_typogrify_dashes_config(self):
# Test default config
page = self.read_file(
- path='article_with_typogrify_dashes.rst',
+ path="article_with_typogrify_dashes.rst",
TYPOGRIFY=True,
- TYPOGRIFY_DASHES='default')
+ TYPOGRIFY_DASHES="default",
+ )
expected = "One: -; Two: —; Three: —-
\n"
expected_title = "One -, two —, three —- dashes!"
@@ -511,9 +508,10 @@ class RstReaderTest(ReaderTest):
# Test 'oldschool' variant
page = self.read_file(
- path='article_with_typogrify_dashes.rst',
+ path="article_with_typogrify_dashes.rst",
TYPOGRIFY=True,
- TYPOGRIFY_DASHES='oldschool')
+ TYPOGRIFY_DASHES="oldschool",
+ )
expected = "One: -; Two: –; Three: —
\n"
expected_title = "One -, two –, three — dashes!"
@@ -522,9 +520,10 @@ class RstReaderTest(ReaderTest):
# Test 'oldschool_inverted' variant
page = self.read_file(
- path='article_with_typogrify_dashes.rst',
+ path="article_with_typogrify_dashes.rst",
TYPOGRIFY=True,
- TYPOGRIFY_DASHES='oldschool_inverted')
+ TYPOGRIFY_DASHES="oldschool_inverted",
+ )
expected = "One: -; Two: —; Three: –
\n"
expected_title = "One -, two —, three – dashes!"
@@ -534,75 +533,73 @@ class RstReaderTest(ReaderTest):
@unittest.skipUnless(readers.Markdown, "markdown isn't installed")
class MdReaderTest(ReaderTest):
-
def test_article_with_metadata(self):
reader = readers.MarkdownReader(settings=get_settings())
- content, metadata = reader.read(
- _path('article_with_md_extension.md'))
+ content, metadata = reader.read(_path("article_with_md_extension.md"))
expected = {
- 'category': 'test',
- 'title': 'Test md File',
- 'summary': 'I have a lot to test
',
- 'date': SafeDatetime(2010, 12, 2, 10, 14),
- 'modified': SafeDatetime(2010, 12, 2, 10, 20),
- 'tags': ['foo', 'bar', 'foobar'],
+ "category": "test",
+ "title": "Test md File",
+ "summary": "I have a lot to test
",
+ "date": SafeDatetime(2010, 12, 2, 10, 14),
+ "modified": SafeDatetime(2010, 12, 2, 10, 20),
+ "tags": ["foo", "bar", "foobar"],
}
self.assertDictHasSubset(metadata, expected)
content, metadata = reader.read(
- _path('article_with_markdown_and_nonascii_summary.md'))
+ _path("article_with_markdown_and_nonascii_summary.md")
+ )
expected = {
- 'title': 'マックOS X 10.8でパイソンとVirtualenvをインストールと設定',
- 'summary': 'パイソンとVirtualenvをまっくでインストールする方法について明確に説明します。
',
- 'category': '指導書',
- 'date': SafeDatetime(2012, 12, 20),
- 'modified': SafeDatetime(2012, 12, 22),
- 'tags': ['パイソン', 'マック'],
- 'slug': 'python-virtualenv-on-mac-osx-mountain-lion-10.8',
+ "title": "マックOS X 10.8でパイソンとVirtualenvをインストールと設定",
+ "summary": "パイソンとVirtualenvをまっくでインストールする方法について明確に説明します。
",
+ "category": "指導書",
+ "date": SafeDatetime(2012, 12, 20),
+ "modified": SafeDatetime(2012, 12, 22),
+ "tags": ["パイソン", "マック"],
+ "slug": "python-virtualenv-on-mac-osx-mountain-lion-10.8",
}
self.assertDictHasSubset(metadata, expected)
def test_article_with_footnote(self):
settings = get_settings()
- ec = settings['MARKDOWN']['extension_configs']
- ec['markdown.extensions.footnotes'] = {'SEPARATOR': '-'}
+ ec = settings["MARKDOWN"]["extension_configs"]
+ ec["markdown.extensions.footnotes"] = {"SEPARATOR": "-"}
reader = readers.MarkdownReader(settings)
- content, metadata = reader.read(
- _path('article_with_markdown_and_footnote.md'))
+ content, metadata = reader.read(_path("article_with_markdown_and_footnote.md"))
expected_content = (
- 'This is some content'
+ "
This is some content"
' '
- ' with some footnotes'
+ ">1"
+ " with some footnotes"
'
\n'
-
'')
+ "\n\n"
+ )
expected_metadata = {
- 'title': 'Article with markdown containing footnotes',
- 'summary': (
- 'Summary with inline markup '
- 'should be supported.
'),
- 'date': SafeDatetime(2012, 10, 31),
- 'modified': SafeDatetime(2012, 11, 1),
- 'multiline': [
- 'Line Metadata should be handle properly.',
- 'See syntax of Meta-Data extension of '
- 'Python Markdown package:',
- 'If a line is indented by 4 or more spaces,',
- 'that line is assumed to be an additional line of the value',
- 'for the previous keyword.',
- 'A keyword may have as many lines as desired.',
- ]
+ "title": "Article with markdown containing footnotes",
+ "summary": (
+ "Summary with inline markup "
+ "should be supported.
"
+ ),
+ "date": SafeDatetime(2012, 10, 31),
+ "modified": SafeDatetime(2012, 11, 1),
+ "multiline": [
+ "Line Metadata should be handle properly.",
+ "See syntax of Meta-Data extension of " "Python Markdown package:",
+ "If a line is indented by 4 or more spaces,",
+ "that line is assumed to be an additional line of the value",
+ "for the previous keyword.",
+ "A keyword may have as many lines as desired.",
+ ],
}
self.assertEqual(content, expected_content)
self.assertDictHasSubset(metadata, expected_metadata)
@@ -611,163 +608,173 @@ class MdReaderTest(ReaderTest):
reader = readers.MarkdownReader(settings=get_settings())
# test to ensure the md file extension is being processed by the
# correct reader
- content, metadata = reader.read(
- _path('article_with_md_extension.md'))
+ content, metadata = reader.read(_path("article_with_md_extension.md"))
expected = (
"Test Markdown File Header \n"
"Used for pelican test \n"
- "The quick brown fox jumped over the lazy dog's back.
")
+ "The quick brown fox jumped over the lazy dog's back.
"
+ )
self.assertEqual(content, expected)
# test to ensure the mkd file extension is being processed by the
# correct reader
- content, metadata = reader.read(
- _path('article_with_mkd_extension.mkd'))
- expected = ("Test Markdown File Header \nUsed for pelican"
- " test \nThis is another markdown test file. Uses"
- " the mkd extension.
")
+ content, metadata = reader.read(_path("article_with_mkd_extension.mkd"))
+ expected = (
+ "Test Markdown File Header \nUsed for pelican"
+ " test \nThis is another markdown test file. Uses"
+ " the mkd extension.
"
+ )
self.assertEqual(content, expected)
# test to ensure the markdown file extension is being processed by the
# correct reader
content, metadata = reader.read(
- _path('article_with_markdown_extension.markdown'))
- expected = ("Test Markdown File Header \nUsed for pelican"
- " test \nThis is another markdown test file. Uses"
- " the markdown extension.
")
+ _path("article_with_markdown_extension.markdown")
+ )
+ expected = (
+ "Test Markdown File Header \nUsed for pelican"
+ " test \nThis is another markdown test file. Uses"
+ " the markdown extension.
"
+ )
self.assertEqual(content, expected)
# test to ensure the mdown file extension is being processed by the
# correct reader
- content, metadata = reader.read(
- _path('article_with_mdown_extension.mdown'))
- expected = ("Test Markdown File Header \nUsed for pelican"
- " test \nThis is another markdown test file. Uses"
- " the mdown extension.
")
+ content, metadata = reader.read(_path("article_with_mdown_extension.mdown"))
+ expected = (
+ "Test Markdown File Header \nUsed for pelican"
+ " test \nThis is another markdown test file. Uses"
+ " the mdown extension.
"
+ )
self.assertEqual(content, expected)
def test_article_with_markdown_markup_extension(self):
# test to ensure the markdown markup extension is being processed as
# expected
page = self.read_file(
- path='article_with_markdown_markup_extensions.md',
+ path="article_with_markdown_markup_extensions.md",
MARKDOWN={
- 'extension_configs': {
- 'markdown.extensions.toc': {},
- 'markdown.extensions.codehilite': {},
- 'markdown.extensions.extra': {}
+ "extension_configs": {
+ "markdown.extensions.toc": {},
+ "markdown.extensions.codehilite": {},
+ "markdown.extensions.extra": {},
}
- }
+ },
+ )
+ expected = (
+ '\n"
+ 'Level1 \n'
+ 'Level2 '
)
- expected = ('\n'
- 'Level1 \n'
- 'Level2 ')
self.assertEqual(page.content, expected)
def test_article_with_filename_metadata(self):
page = self.read_file(
- path='2012-11-30_md_w_filename_meta#foo-bar.md',
- FILENAME_METADATA=None)
+ path="2012-11-30_md_w_filename_meta#foo-bar.md", FILENAME_METADATA=None
+ )
expected = {
- 'category': 'yeah',
- 'author': 'Alexis Métaireau',
+ "category": "yeah",
+ "author": "Alexis Métaireau",
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
- path='2012-11-30_md_w_filename_meta#foo-bar.md',
- FILENAME_METADATA=r'(?P\d{4}-\d{2}-\d{2}).*')
+ path="2012-11-30_md_w_filename_meta#foo-bar.md",
+ FILENAME_METADATA=r"(?P\d{4}-\d{2}-\d{2}).*",
+ )
expected = {
- 'category': 'yeah',
- 'author': 'Alexis Métaireau',
- 'date': SafeDatetime(2012, 11, 30),
+ "category": "yeah",
+ "author": "Alexis Métaireau",
+ "date": SafeDatetime(2012, 11, 30),
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
- path='2012-11-30_md_w_filename_meta#foo-bar.md',
+ path="2012-11-30_md_w_filename_meta#foo-bar.md",
FILENAME_METADATA=(
- r'(?P\d{4}-\d{2}-\d{2})'
- r'_(?P.*)'
- r'#(?P.*)-(?P.*)'))
+ r"(?P\d{4}-\d{2}-\d{2})"
+ r"_(?P.*)"
+ r"#(?P.*)-(?P.*)"
+ ),
+ )
expected = {
- 'category': 'yeah',
- 'author': 'Alexis Métaireau',
- 'date': SafeDatetime(2012, 11, 30),
- 'slug': 'md_w_filename_meta',
- 'mymeta': 'foo',
+ "category": "yeah",
+ "author": "Alexis Métaireau",
+ "date": SafeDatetime(2012, 11, 30),
+ "slug": "md_w_filename_meta",
+ "mymeta": "foo",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_optional_filename_metadata(self):
page = self.read_file(
- path='2012-11-30_md_w_filename_meta#foo-bar.md',
- FILENAME_METADATA=r'(?P\d{4}-\d{2}-\d{2})?')
+ path="2012-11-30_md_w_filename_meta#foo-bar.md",
+ FILENAME_METADATA=r"(?P\d{4}-\d{2}-\d{2})?",
+ )
expected = {
- 'date': SafeDatetime(2012, 11, 30),
- 'reader': 'markdown',
+ "date": SafeDatetime(2012, 11, 30),
+ "reader": "markdown",
}
self.assertDictHasSubset(page.metadata, expected)
page = self.read_file(
- path='empty.md',
- FILENAME_METADATA=r'(?P\d{4}-\d{2}-\d{2})?')
+ path="empty.md", FILENAME_METADATA=r"(?P\d{4}-\d{2}-\d{2})?"
+ )
expected = {
- 'reader': 'markdown',
+ "reader": "markdown",
}
self.assertDictHasSubset(page.metadata, expected)
- self.assertNotIn('date', page.metadata, 'Date should not be set.')
+ self.assertNotIn("date", page.metadata, "Date should not be set.")
def test_duplicate_tags_or_authors_are_removed(self):
reader = readers.MarkdownReader(settings=get_settings())
- content, metadata = reader.read(
- _path('article_with_duplicate_tags_authors.md'))
+ content, metadata = reader.read(_path("article_with_duplicate_tags_authors.md"))
expected = {
- 'tags': ['foo', 'bar', 'foobar'],
- 'authors': ['Author, First', 'Author, Second'],
+ "tags": ["foo", "bar", "foobar"],
+ "authors": ["Author, First", "Author, Second"],
}
self.assertDictHasSubset(metadata, expected)
def test_metadata_not_parsed_for_metadata(self):
settings = get_settings()
- settings['FORMATTED_FIELDS'] = ['summary']
+ settings["FORMATTED_FIELDS"] = ["summary"]
reader = readers.MarkdownReader(settings=settings)
content, metadata = reader.read(
- _path('article_with_markdown_and_nested_metadata.md'))
+ _path("article_with_markdown_and_nested_metadata.md")
+ )
expected = {
- 'title': 'Article with markdown and nested summary metadata',
- 'summary': 'Test: This metadata value looks like metadata
',
+ "title": "Article with markdown and nested summary metadata",
+ "summary": "Test: This metadata value looks like metadata
",
}
self.assertDictHasSubset(metadata, expected)
def test_empty_file(self):
reader = readers.MarkdownReader(settings=get_settings())
- content, metadata = reader.read(
- _path('empty.md'))
+ content, metadata = reader.read(_path("empty.md"))
self.assertEqual(metadata, {})
- self.assertEqual(content, '')
+ self.assertEqual(content, "")
def test_empty_file_with_bom(self):
reader = readers.MarkdownReader(settings=get_settings())
- content, metadata = reader.read(
- _path('empty_with_bom.md'))
+ content, metadata = reader.read(_path("empty_with_bom.md"))
self.assertEqual(metadata, {})
- self.assertEqual(content, '')
+ self.assertEqual(content, "")
def test_typogrify_dashes_config(self):
# Test default config
page = self.read_file(
- path='article_with_typogrify_dashes.md',
+ path="article_with_typogrify_dashes.md",
TYPOGRIFY=True,
- TYPOGRIFY_DASHES='default')
+ TYPOGRIFY_DASHES="default",
+ )
expected = "One: -; Two: —; Three: —-
"
expected_title = "One -, two —, three —- dashes!"
@@ -776,9 +783,10 @@ class MdReaderTest(ReaderTest):
# Test 'oldschool' variant
page = self.read_file(
- path='article_with_typogrify_dashes.md',
+ path="article_with_typogrify_dashes.md",
TYPOGRIFY=True,
- TYPOGRIFY_DASHES='oldschool')
+ TYPOGRIFY_DASHES="oldschool",
+ )
expected = "One: -; Two: –; Three: —
"
expected_title = "One -, two –, three — dashes!"
@@ -787,9 +795,10 @@ class MdReaderTest(ReaderTest):
# Test 'oldschool_inverted' variant
page = self.read_file(
- path='article_with_typogrify_dashes.md',
+ path="article_with_typogrify_dashes.md",
TYPOGRIFY=True,
- TYPOGRIFY_DASHES='oldschool_inverted')
+ TYPOGRIFY_DASHES="oldschool_inverted",
+ )
expected = "One: -; Two: —; Three: –
"
expected_title = "One -, two —, three – dashes!"
@@ -797,124 +806,130 @@ class MdReaderTest(ReaderTest):
self.assertEqual(page.title, expected_title)
def test_metadata_has_no_discarded_data(self):
- md_filename = 'article_with_markdown_and_empty_tags.md'
+ md_filename = "article_with_markdown_and_empty_tags.md"
- r = readers.Readers(cache_name='cache', settings=get_settings(
- CACHE_CONTENT=True))
+ r = readers.Readers(
+ cache_name="cache", settings=get_settings(CACHE_CONTENT=True)
+ )
page = r.read_file(base_path=CONTENT_PATH, path=md_filename)
- __, cached_metadata = r.get_cached_data(
- _path(md_filename), (None, None))
+ __, cached_metadata = r.get_cached_data(_path(md_filename), (None, None))
- expected = {
- 'title': 'Article with markdown and empty tags'
- }
+ expected = {"title": "Article with markdown and empty tags"}
self.assertEqual(cached_metadata, expected)
- self.assertNotIn('tags', page.metadata)
+ self.assertNotIn("tags", page.metadata)
self.assertDictHasSubset(page.metadata, expected)
class HTMLReaderTest(ReaderTest):
def test_article_with_comments(self):
- page = self.read_file(path='article_with_comments.html')
+ page = self.read_file(path="article_with_comments.html")
- self.assertEqual('''
+ self.assertEqual(
+ """
Body content
- ''', page.content)
+ """,
+ page.content,
+ )
def test_article_with_keywords(self):
- page = self.read_file(path='article_with_keywords.html')
+ page = self.read_file(path="article_with_keywords.html")
expected = {
- 'tags': ['foo', 'bar', 'foobar'],
+ "tags": ["foo", "bar", "foobar"],
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_metadata(self):
- page = self.read_file(path='article_with_metadata.html')
+ page = self.read_file(path="article_with_metadata.html")
expected = {
- 'category': 'yeah',
- 'author': 'Alexis Métaireau',
- 'title': 'This is a super article !',
- 'summary': 'Summary and stuff',
- 'date': SafeDatetime(2010, 12, 2, 10, 14),
- 'tags': ['foo', 'bar', 'foobar'],
- 'custom_field': 'http://notmyidea.org',
+ "category": "yeah",
+ "author": "Alexis Métaireau",
+ "title": "This is a super article !",
+ "summary": "Summary and stuff",
+ "date": SafeDatetime(2010, 12, 2, 10, 14),
+ "tags": ["foo", "bar", "foobar"],
+ "custom_field": "http://notmyidea.org",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_similar_metadata_tags(self):
- page = self.read_file(path='article_with_multiple_metadata_tags.html')
+ page = self.read_file(path="article_with_multiple_metadata_tags.html")
expected = {
- 'custom_field': ['https://getpelican.com', 'https://www.eff.org'],
+ "custom_field": ["https://getpelican.com", "https://www.eff.org"],
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_multiple_authors(self):
- page = self.read_file(path='article_with_multiple_authors.html')
- expected = {
- 'authors': ['First Author', 'Second Author']
- }
+ page = self.read_file(path="article_with_multiple_authors.html")
+ expected = {"authors": ["First Author", "Second Author"]}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_metadata_and_contents_attrib(self):
- page = self.read_file(path='article_with_metadata_and_contents.html')
+ page = self.read_file(path="article_with_metadata_and_contents.html")
expected = {
- 'category': 'yeah',
- 'author': 'Alexis Métaireau',
- 'title': 'This is a super article !',
- 'summary': 'Summary and stuff',
- 'date': SafeDatetime(2010, 12, 2, 10, 14),
- 'tags': ['foo', 'bar', 'foobar'],
- 'custom_field': 'http://notmyidea.org',
+ "category": "yeah",
+ "author": "Alexis Métaireau",
+ "title": "This is a super article !",
+ "summary": "Summary and stuff",
+ "date": SafeDatetime(2010, 12, 2, 10, 14),
+ "tags": ["foo", "bar", "foobar"],
+ "custom_field": "http://notmyidea.org",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_null_attributes(self):
- page = self.read_file(path='article_with_null_attributes.html')
+ page = self.read_file(path="article_with_null_attributes.html")
- self.assertEqual('''
+ self.assertEqual(
+ """
Ensure that empty attributes are copied properly.
- ''', page.content)
+ """,
+ page.content,
+ )
def test_article_with_attributes_containing_double_quotes(self):
- page = self.read_file(path='article_with_attributes_containing_' +
- 'double_quotes.html')
- self.assertEqual('''
+ page = self.read_file(
+ path="article_with_attributes_containing_" + "double_quotes.html"
+ )
+ self.assertEqual(
+ """
Ensure that if an attribute value contains a double quote, it is
surrounded with single quotes, otherwise with double quotes.
Span content
Span content
Span content
- ''', page.content)
+ """,
+ page.content,
+ )
def test_article_metadata_key_lowercase(self):
# Keys of metadata should be lowercase.
- page = self.read_file(path='article_with_uppercase_metadata.html')
+ page = self.read_file(path="article_with_uppercase_metadata.html")
# Key should be lowercase
- self.assertIn('category', page.metadata, 'Key should be lowercase.')
+ self.assertIn("category", page.metadata, "Key should be lowercase.")
# Value should keep cases
- self.assertEqual('Yeah', page.metadata.get('category'))
+ self.assertEqual("Yeah", page.metadata.get("category"))
def test_article_with_nonconformant_meta_tags(self):
- page = self.read_file(path='article_with_nonconformant_meta_tags.html')
+ page = self.read_file(path="article_with_nonconformant_meta_tags.html")
expected = {
- 'summary': 'Summary and stuff',
- 'title': 'Article with Nonconformant HTML meta tags',
+ "summary": "Summary and stuff",
+ "title": "Article with Nonconformant HTML meta tags",
}
self.assertDictHasSubset(page.metadata, expected)
def test_article_with_inline_svg(self):
- page = self.read_file(path='article_with_inline_svg.html')
+ page = self.read_file(path="article_with_inline_svg.html")
expected = {
- 'title': 'Article with an inline SVG',
+ "title": "Article with an inline SVG",
}
self.assertDictHasSubset(page.metadata, expected)
diff --git a/pelican/tests/test_rstdirectives.py b/pelican/tests/test_rstdirectives.py
index 6b733971..46ed6f49 100644
--- a/pelican/tests/test_rstdirectives.py
+++ b/pelican/tests/test_rstdirectives.py
@@ -6,11 +6,11 @@ from pelican.tests.support import unittest
class Test_abbr_role(unittest.TestCase):
def call_it(self, text):
from pelican.rstdirectives import abbr_role
+
rawtext = text
lineno = 42
- inliner = Mock(name='inliner')
- nodes, system_messages = abbr_role(
- 'abbr', rawtext, text, lineno, inliner)
+ inliner = Mock(name="inliner")
+ nodes, system_messages = abbr_role("abbr", rawtext, text, lineno, inliner)
self.assertEqual(system_messages, [])
self.assertEqual(len(nodes), 1)
return nodes[0]
@@ -18,14 +18,14 @@ class Test_abbr_role(unittest.TestCase):
def test(self):
node = self.call_it("Abbr (Abbreviation)")
self.assertEqual(node.astext(), "Abbr")
- self.assertEqual(node['explanation'], "Abbreviation")
+ self.assertEqual(node["explanation"], "Abbreviation")
def test_newlines_in_explanation(self):
node = self.call_it("CUL (See you\nlater)")
self.assertEqual(node.astext(), "CUL")
- self.assertEqual(node['explanation'], "See you\nlater")
+ self.assertEqual(node["explanation"], "See you\nlater")
def test_newlines_in_abbr(self):
node = self.call_it("US of\nA \n (USA)")
self.assertEqual(node.astext(), "US of\nA")
- self.assertEqual(node['explanation'], "USA")
+ self.assertEqual(node["explanation"], "USA")
diff --git a/pelican/tests/test_server.py b/pelican/tests/test_server.py
index 9af030f8..fd616ef7 100644
--- a/pelican/tests/test_server.py
+++ b/pelican/tests/test_server.py
@@ -17,10 +17,9 @@ class MockServer:
class TestServer(unittest.TestCase):
-
def setUp(self):
self.server = MockServer()
- self.temp_output = mkdtemp(prefix='pelicantests.')
+ self.temp_output = mkdtemp(prefix="pelicantests.")
self.old_cwd = os.getcwd()
os.chdir(self.temp_output)
@@ -29,32 +28,33 @@ class TestServer(unittest.TestCase):
rmtree(self.temp_output)
def test_get_path_that_exists(self):
- handler = ComplexHTTPRequestHandler(MockRequest(), ('0.0.0.0', 8888),
- self.server)
+ handler = ComplexHTTPRequestHandler(
+ MockRequest(), ("0.0.0.0", 8888), self.server
+ )
handler.base_path = self.temp_output
- open(os.path.join(self.temp_output, 'foo.html'), 'a').close()
- os.mkdir(os.path.join(self.temp_output, 'foo'))
- open(os.path.join(self.temp_output, 'foo', 'index.html'), 'a').close()
+ open(os.path.join(self.temp_output, "foo.html"), "a").close()
+ os.mkdir(os.path.join(self.temp_output, "foo"))
+ open(os.path.join(self.temp_output, "foo", "index.html"), "a").close()
- os.mkdir(os.path.join(self.temp_output, 'bar'))
- open(os.path.join(self.temp_output, 'bar', 'index.html'), 'a').close()
+ os.mkdir(os.path.join(self.temp_output, "bar"))
+ open(os.path.join(self.temp_output, "bar", "index.html"), "a").close()
- os.mkdir(os.path.join(self.temp_output, 'baz'))
+ os.mkdir(os.path.join(self.temp_output, "baz"))
- for suffix in ['', '/']:
+ for suffix in ["", "/"]:
# foo.html has precedence over foo/index.html
- path = handler.get_path_that_exists('foo' + suffix)
- self.assertEqual(path, 'foo.html')
+ path = handler.get_path_that_exists("foo" + suffix)
+ self.assertEqual(path, "foo.html")
# folder with index.html should return folder/index.html
- path = handler.get_path_that_exists('bar' + suffix)
- self.assertEqual(path, 'bar/index.html')
+ path = handler.get_path_that_exists("bar" + suffix)
+ self.assertEqual(path, "bar/index.html")
# folder without index.html should return same as input
- path = handler.get_path_that_exists('baz' + suffix)
- self.assertEqual(path, 'baz' + suffix)
+ path = handler.get_path_that_exists("baz" + suffix)
+ self.assertEqual(path, "baz" + suffix)
# not existing path should return None
- path = handler.get_path_that_exists('quux' + suffix)
+ path = handler.get_path_that_exists("quux" + suffix)
self.assertIsNone(path)
diff --git a/pelican/tests/test_settings.py b/pelican/tests/test_settings.py
index 0f630ad5..0e77674d 100644
--- a/pelican/tests/test_settings.py
+++ b/pelican/tests/test_settings.py
@@ -4,10 +4,14 @@ import os
from os.path import abspath, dirname, join
-from pelican.settings import (DEFAULT_CONFIG, DEFAULT_THEME,
- _printf_s_to_format_field,
- configure_settings,
- handle_deprecated_settings, read_settings)
+from pelican.settings import (
+ DEFAULT_CONFIG,
+ DEFAULT_THEME,
+ _printf_s_to_format_field,
+ configure_settings,
+ handle_deprecated_settings,
+ read_settings,
+)
from pelican.tests.support import unittest
@@ -16,40 +20,39 @@ class TestSettingsConfiguration(unittest.TestCase):
append new values to the settings (if any), and apply basic settings
optimizations.
"""
+
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
- locale.setlocale(locale.LC_ALL, 'C')
+ locale.setlocale(locale.LC_ALL, "C")
self.PATH = abspath(dirname(__file__))
- default_conf = join(self.PATH, 'default_conf.py')
+ default_conf = join(self.PATH, "default_conf.py")
self.settings = read_settings(default_conf)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_overwrite_existing_settings(self):
- self.assertEqual(self.settings.get('SITENAME'), "Alexis' log")
- self.assertEqual(
- self.settings.get('SITEURL'),
- 'http://blog.notmyidea.org')
+ self.assertEqual(self.settings.get("SITENAME"), "Alexis' log")
+ self.assertEqual(self.settings.get("SITEURL"), "http://blog.notmyidea.org")
def test_keep_default_settings(self):
# Keep default settings if not defined.
self.assertEqual(
- self.settings.get('DEFAULT_CATEGORY'),
- DEFAULT_CONFIG['DEFAULT_CATEGORY'])
+ self.settings.get("DEFAULT_CATEGORY"), DEFAULT_CONFIG["DEFAULT_CATEGORY"]
+ )
def test_dont_copy_small_keys(self):
# Do not copy keys not in caps.
- self.assertNotIn('foobar', self.settings)
+ self.assertNotIn("foobar", self.settings)
def test_read_empty_settings(self):
# Ensure an empty settings file results in default settings.
settings = read_settings(None)
expected = copy.deepcopy(DEFAULT_CONFIG)
# Added by configure settings
- expected['FEED_DOMAIN'] = ''
- expected['ARTICLE_EXCLUDES'] = ['pages']
- expected['PAGE_EXCLUDES'] = ['']
+ expected["FEED_DOMAIN"] = ""
+ expected["ARTICLE_EXCLUDES"] = ["pages"]
+ expected["PAGE_EXCLUDES"] = [""]
self.maxDiff = None
self.assertDictEqual(settings, expected)
@@ -57,250 +60,265 @@ class TestSettingsConfiguration(unittest.TestCase):
# Make sure that the results from one settings call doesn't
# effect past or future instances.
self.PATH = abspath(dirname(__file__))
- default_conf = join(self.PATH, 'default_conf.py')
+ default_conf = join(self.PATH, "default_conf.py")
settings = read_settings(default_conf)
- settings['SITEURL'] = 'new-value'
+ settings["SITEURL"] = "new-value"
new_settings = read_settings(default_conf)
- self.assertNotEqual(new_settings['SITEURL'], settings['SITEURL'])
+ self.assertNotEqual(new_settings["SITEURL"], settings["SITEURL"])
def test_defaults_not_overwritten(self):
# This assumes 'SITENAME': 'A Pelican Blog'
settings = read_settings(None)
- settings['SITENAME'] = 'Not a Pelican Blog'
- self.assertNotEqual(settings['SITENAME'], DEFAULT_CONFIG['SITENAME'])
+ settings["SITENAME"] = "Not a Pelican Blog"
+ self.assertNotEqual(settings["SITENAME"], DEFAULT_CONFIG["SITENAME"])
def test_static_path_settings_safety(self):
# Disallow static paths from being strings
settings = {
- 'STATIC_PATHS': 'foo/bar',
- 'THEME_STATIC_PATHS': 'bar/baz',
+ "STATIC_PATHS": "foo/bar",
+ "THEME_STATIC_PATHS": "bar/baz",
# These 4 settings are required to run configure_settings
- 'PATH': '.',
- 'THEME': DEFAULT_THEME,
- 'SITEURL': 'http://blog.notmyidea.org/',
- 'LOCALE': '',
+ "PATH": ".",
+ "THEME": DEFAULT_THEME,
+ "SITEURL": "http://blog.notmyidea.org/",
+ "LOCALE": "",
}
configure_settings(settings)
+ self.assertEqual(settings["STATIC_PATHS"], DEFAULT_CONFIG["STATIC_PATHS"])
self.assertEqual(
- settings['STATIC_PATHS'],
- DEFAULT_CONFIG['STATIC_PATHS'])
- self.assertEqual(
- settings['THEME_STATIC_PATHS'],
- DEFAULT_CONFIG['THEME_STATIC_PATHS'])
+ settings["THEME_STATIC_PATHS"], DEFAULT_CONFIG["THEME_STATIC_PATHS"]
+ )
def test_configure_settings(self):
# Manipulations to settings should be applied correctly.
settings = {
- 'SITEURL': 'http://blog.notmyidea.org/',
- 'LOCALE': '',
- 'PATH': os.curdir,
- 'THEME': DEFAULT_THEME,
+ "SITEURL": "http://blog.notmyidea.org/",
+ "LOCALE": "",
+ "PATH": os.curdir,
+ "THEME": DEFAULT_THEME,
}
configure_settings(settings)
# SITEURL should not have a trailing slash
- self.assertEqual(settings['SITEURL'], 'http://blog.notmyidea.org')
+ self.assertEqual(settings["SITEURL"], "http://blog.notmyidea.org")
# FEED_DOMAIN, if undefined, should default to SITEURL
- self.assertEqual(settings['FEED_DOMAIN'], 'http://blog.notmyidea.org')
+ self.assertEqual(settings["FEED_DOMAIN"], "http://blog.notmyidea.org")
- settings['FEED_DOMAIN'] = 'http://feeds.example.com'
+ settings["FEED_DOMAIN"] = "http://feeds.example.com"
configure_settings(settings)
- self.assertEqual(settings['FEED_DOMAIN'], 'http://feeds.example.com')
+ self.assertEqual(settings["FEED_DOMAIN"], "http://feeds.example.com")
def test_theme_settings_exceptions(self):
settings = self.settings
# Check that theme lookup in "pelican/themes" functions as expected
- settings['THEME'] = os.path.split(settings['THEME'])[1]
+ settings["THEME"] = os.path.split(settings["THEME"])[1]
configure_settings(settings)
- self.assertEqual(settings['THEME'], DEFAULT_THEME)
+ self.assertEqual(settings["THEME"], DEFAULT_THEME)
# Check that non-existent theme raises exception
- settings['THEME'] = 'foo'
+ settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings)
def test_deprecated_dir_setting(self):
settings = self.settings
- settings['ARTICLE_DIR'] = 'foo'
- settings['PAGE_DIR'] = 'bar'
+ settings["ARTICLE_DIR"] = "foo"
+ settings["PAGE_DIR"] = "bar"
settings = handle_deprecated_settings(settings)
- self.assertEqual(settings['ARTICLE_PATHS'], ['foo'])
- self.assertEqual(settings['PAGE_PATHS'], ['bar'])
+ self.assertEqual(settings["ARTICLE_PATHS"], ["foo"])
+ self.assertEqual(settings["PAGE_PATHS"], ["bar"])
with self.assertRaises(KeyError):
- settings['ARTICLE_DIR']
- settings['PAGE_DIR']
+ settings["ARTICLE_DIR"]
+ settings["PAGE_DIR"]
def test_default_encoding(self):
# Test that the user locale is set if not specified in settings
- locale.setlocale(locale.LC_ALL, 'C')
+ locale.setlocale(locale.LC_ALL, "C")
# empty string = user system locale
- self.assertEqual(self.settings['LOCALE'], [''])
+ self.assertEqual(self.settings["LOCALE"], [""])
configure_settings(self.settings)
lc_time = locale.getlocale(locale.LC_TIME) # should be set to user locale
# explicitly set locale to user pref and test
- locale.setlocale(locale.LC_TIME, '')
+ locale.setlocale(locale.LC_TIME, "")
self.assertEqual(lc_time, locale.getlocale(locale.LC_TIME))
def test_invalid_settings_throw_exception(self):
# Test that the path name is valid
# test that 'PATH' is set
- settings = {
- }
+ settings = {}
self.assertRaises(Exception, configure_settings, settings)
# Test that 'PATH' is valid
- settings['PATH'] = ''
+ settings["PATH"] = ""
self.assertRaises(Exception, configure_settings, settings)
# Test nonexistent THEME
- settings['PATH'] = os.curdir
- settings['THEME'] = 'foo'
+ settings["PATH"] = os.curdir
+ settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings)
def test__printf_s_to_format_field(self):
- for s in ('%s', '{%s}', '{%s'):
- option = 'foo/{}/bar.baz'.format(s)
- result = _printf_s_to_format_field(option, 'slug')
- expected = option % 'qux'
- found = result.format(slug='qux')
+ for s in ("%s", "{%s}", "{%s"):
+ option = "foo/{}/bar.baz".format(s)
+ result = _printf_s_to_format_field(option, "slug")
+ expected = option % "qux"
+ found = result.format(slug="qux")
self.assertEqual(expected, found)
def test_deprecated_extra_templates_paths(self):
settings = self.settings
- settings['EXTRA_TEMPLATES_PATHS'] = ['/foo/bar', '/ha']
+ settings["EXTRA_TEMPLATES_PATHS"] = ["/foo/bar", "/ha"]
settings = handle_deprecated_settings(settings)
- self.assertEqual(settings['THEME_TEMPLATES_OVERRIDES'],
- ['/foo/bar', '/ha'])
- self.assertNotIn('EXTRA_TEMPLATES_PATHS', settings)
+ self.assertEqual(settings["THEME_TEMPLATES_OVERRIDES"], ["/foo/bar", "/ha"])
+ self.assertNotIn("EXTRA_TEMPLATES_PATHS", settings)
def test_deprecated_paginated_direct_templates(self):
settings = self.settings
- settings['PAGINATED_DIRECT_TEMPLATES'] = ['index', 'archives']
- settings['PAGINATED_TEMPLATES'] = {'index': 10, 'category': None}
+ settings["PAGINATED_DIRECT_TEMPLATES"] = ["index", "archives"]
+ settings["PAGINATED_TEMPLATES"] = {"index": 10, "category": None}
settings = handle_deprecated_settings(settings)
- self.assertEqual(settings['PAGINATED_TEMPLATES'],
- {'index': 10, 'category': None, 'archives': None})
- self.assertNotIn('PAGINATED_DIRECT_TEMPLATES', settings)
+ self.assertEqual(
+ settings["PAGINATED_TEMPLATES"],
+ {"index": 10, "category": None, "archives": None},
+ )
+ self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_deprecated_paginated_direct_templates_from_file(self):
# This is equivalent to reading a settings file that has
# PAGINATED_DIRECT_TEMPLATES defined but no PAGINATED_TEMPLATES.
- settings = read_settings(None, override={
- 'PAGINATED_DIRECT_TEMPLATES': ['index', 'archives']
- })
- self.assertEqual(settings['PAGINATED_TEMPLATES'], {
- 'archives': None,
- 'author': None,
- 'index': None,
- 'category': None,
- 'tag': None})
- self.assertNotIn('PAGINATED_DIRECT_TEMPLATES', settings)
+ settings = read_settings(
+ None, override={"PAGINATED_DIRECT_TEMPLATES": ["index", "archives"]}
+ )
+ self.assertEqual(
+ settings["PAGINATED_TEMPLATES"],
+ {
+ "archives": None,
+ "author": None,
+ "index": None,
+ "category": None,
+ "tag": None,
+ },
+ )
+ self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_theme_and_extra_templates_exception(self):
settings = self.settings
- settings['EXTRA_TEMPLATES_PATHS'] = ['/ha']
- settings['THEME_TEMPLATES_OVERRIDES'] = ['/foo/bar']
+ settings["EXTRA_TEMPLATES_PATHS"] = ["/ha"]
+ settings["THEME_TEMPLATES_OVERRIDES"] = ["/foo/bar"]
self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_slug_and_slug_regex_substitutions_exception(self):
settings = {}
- settings['SLUG_REGEX_SUBSTITUTIONS'] = [('C++', 'cpp')]
- settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')]
+ settings["SLUG_REGEX_SUBSTITUTIONS"] = [("C++", "cpp")]
+ settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_deprecated_slug_substitutions(self):
- default_slug_regex_subs = self.settings['SLUG_REGEX_SUBSTITUTIONS']
+ default_slug_regex_subs = self.settings["SLUG_REGEX_SUBSTITUTIONS"]
# If no deprecated setting is set, don't set new ones
settings = {}
settings = handle_deprecated_settings(settings)
- self.assertNotIn('SLUG_REGEX_SUBSTITUTIONS', settings)
- self.assertNotIn('TAG_REGEX_SUBSTITUTIONS', settings)
- self.assertNotIn('CATEGORY_REGEX_SUBSTITUTIONS', settings)
- self.assertNotIn('AUTHOR_REGEX_SUBSTITUTIONS', settings)
+ self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
+ self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
+ self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
+ self.assertNotIn("AUTHOR_REGEX_SUBSTITUTIONS", settings)
# If SLUG_SUBSTITUTIONS is set, set {SLUG, AUTHOR}_REGEX_SUBSTITUTIONS
# correctly, don't set {CATEGORY, TAG}_REGEX_SUBSTITUTIONS
settings = {}
- settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp')]
+ settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
settings = handle_deprecated_settings(settings)
- self.assertEqual(settings.get('SLUG_REGEX_SUBSTITUTIONS'),
- [(r'C\+\+', 'cpp')] + default_slug_regex_subs)
- self.assertNotIn('TAG_REGEX_SUBSTITUTIONS', settings)
- self.assertNotIn('CATEGORY_REGEX_SUBSTITUTIONS', settings)
- self.assertEqual(settings.get('AUTHOR_REGEX_SUBSTITUTIONS'),
- default_slug_regex_subs)
+ self.assertEqual(
+ settings.get("SLUG_REGEX_SUBSTITUTIONS"),
+ [(r"C\+\+", "cpp")] + default_slug_regex_subs,
+ )
+ self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
+ self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
+ self.assertEqual(
+ settings.get("AUTHOR_REGEX_SUBSTITUTIONS"), default_slug_regex_subs
+ )
# If {CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly, don't set
# SLUG_REGEX_SUBSTITUTIONS
settings = {}
- settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')]
- settings['CATEGORY_SUBSTITUTIONS'] = [('C#', 'csharp')]
- settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov')]
+ settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
+ settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
+ settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings)
- self.assertNotIn('SLUG_REGEX_SUBSTITUTIONS', settings)
- self.assertEqual(settings['TAG_REGEX_SUBSTITUTIONS'],
- [(r'C\#', 'csharp')] + default_slug_regex_subs)
- self.assertEqual(settings['CATEGORY_REGEX_SUBSTITUTIONS'],
- [(r'C\#', 'csharp')] + default_slug_regex_subs)
- self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'],
- [(r'Alexander\ Todorov', 'atodorov')] +
- default_slug_regex_subs)
+ self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
+ self.assertEqual(
+ settings["TAG_REGEX_SUBSTITUTIONS"],
+ [(r"C\#", "csharp")] + default_slug_regex_subs,
+ )
+ self.assertEqual(
+ settings["CATEGORY_REGEX_SUBSTITUTIONS"],
+ [(r"C\#", "csharp")] + default_slug_regex_subs,
+ )
+ self.assertEqual(
+ settings["AUTHOR_REGEX_SUBSTITUTIONS"],
+ [(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
+ )
# If {SLUG, CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {SLUG, CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly
settings = {}
- settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp')]
- settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')]
- settings['CATEGORY_SUBSTITUTIONS'] = [('C#', 'csharp')]
- settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov')]
+ settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
+ settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
+ settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
+ settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings)
- self.assertEqual(settings['TAG_REGEX_SUBSTITUTIONS'],
- [(r'C\+\+', 'cpp')] + [(r'C\#', 'csharp')] +
- default_slug_regex_subs)
- self.assertEqual(settings['CATEGORY_REGEX_SUBSTITUTIONS'],
- [(r'C\+\+', 'cpp')] + [(r'C\#', 'csharp')] +
- default_slug_regex_subs)
- self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'],
- [(r'Alexander\ Todorov', 'atodorov')] +
- default_slug_regex_subs)
+ self.assertEqual(
+ settings["TAG_REGEX_SUBSTITUTIONS"],
+ [(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
+ )
+ self.assertEqual(
+ settings["CATEGORY_REGEX_SUBSTITUTIONS"],
+ [(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
+ )
+ self.assertEqual(
+ settings["AUTHOR_REGEX_SUBSTITUTIONS"],
+ [(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
+ )
# Handle old 'skip' flags correctly
settings = {}
- settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp', True)]
- settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov',
- False)]
+ settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp", True)]
+ settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov", False)]
settings = handle_deprecated_settings(settings)
- self.assertEqual(settings.get('SLUG_REGEX_SUBSTITUTIONS'),
- [(r'C\+\+', 'cpp')] +
- [(r'(?u)\A\s*', ''), (r'(?u)\s*\Z', '')])
- self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'],
- [(r'Alexander\ Todorov', 'atodorov')] +
- default_slug_regex_subs)
+ self.assertEqual(
+ settings.get("SLUG_REGEX_SUBSTITUTIONS"),
+ [(r"C\+\+", "cpp")] + [(r"(?u)\A\s*", ""), (r"(?u)\s*\Z", "")],
+ )
+ self.assertEqual(
+ settings["AUTHOR_REGEX_SUBSTITUTIONS"],
+ [(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
+ )
def test_deprecated_slug_substitutions_from_file(self):
# This is equivalent to reading a settings file that has
# SLUG_SUBSTITUTIONS defined but no SLUG_REGEX_SUBSTITUTIONS.
- settings = read_settings(None, override={
- 'SLUG_SUBSTITUTIONS': [('C++', 'cpp')]
- })
- self.assertEqual(settings['SLUG_REGEX_SUBSTITUTIONS'],
- [(r'C\+\+', 'cpp')] +
- self.settings['SLUG_REGEX_SUBSTITUTIONS'])
- self.assertNotIn('SLUG_SUBSTITUTIONS', settings)
+ settings = read_settings(
+ None, override={"SLUG_SUBSTITUTIONS": [("C++", "cpp")]}
+ )
+ self.assertEqual(
+ settings["SLUG_REGEX_SUBSTITUTIONS"],
+ [(r"C\+\+", "cpp")] + self.settings["SLUG_REGEX_SUBSTITUTIONS"],
+ )
+ self.assertNotIn("SLUG_SUBSTITUTIONS", settings)
diff --git a/pelican/tests/test_testsuite.py b/pelican/tests/test_testsuite.py
index fa930139..a9a0c200 100644
--- a/pelican/tests/test_testsuite.py
+++ b/pelican/tests/test_testsuite.py
@@ -4,7 +4,6 @@ from pelican.tests.support import unittest
class TestSuiteTest(unittest.TestCase):
-
def test_error_on_warning(self):
with self.assertRaises(UserWarning):
- warnings.warn('test warning')
+ warnings.warn("test warning")
diff --git a/pelican/tests/test_urlwrappers.py b/pelican/tests/test_urlwrappers.py
index 66ae1524..13632e3a 100644
--- a/pelican/tests/test_urlwrappers.py
+++ b/pelican/tests/test_urlwrappers.py
@@ -5,22 +5,22 @@ from pelican.urlwrappers import Author, Category, Tag, URLWrapper
class TestURLWrapper(unittest.TestCase):
def test_ordering(self):
# URLWrappers are sorted by name
- wrapper_a = URLWrapper(name='first', settings={})
- wrapper_b = URLWrapper(name='last', settings={})
+ wrapper_a = URLWrapper(name="first", settings={})
+ wrapper_b = URLWrapper(name="last", settings={})
self.assertFalse(wrapper_a > wrapper_b)
self.assertFalse(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b)
self.assertTrue(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b)
self.assertTrue(wrapper_a < wrapper_b)
- wrapper_b.name = 'first'
+ wrapper_b.name = "first"
self.assertFalse(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b)
self.assertTrue(wrapper_a == wrapper_b)
self.assertFalse(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b)
self.assertFalse(wrapper_a < wrapper_b)
- wrapper_a.name = 'last'
+ wrapper_a.name = "last"
self.assertTrue(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b)
@@ -29,57 +29,68 @@ class TestURLWrapper(unittest.TestCase):
self.assertFalse(wrapper_a < wrapper_b)
def test_equality(self):
- tag = Tag('test', settings={})
- cat = Category('test', settings={})
- author = Author('test', settings={})
+ tag = Tag("test", settings={})
+ cat = Category("test", settings={})
+ author = Author("test", settings={})
# same name, but different class
self.assertNotEqual(tag, cat)
self.assertNotEqual(tag, author)
# should be equal vs text representing the same name
- self.assertEqual(tag, 'test')
+ self.assertEqual(tag, "test")
# should not be equal vs binary
- self.assertNotEqual(tag, b'test')
+ self.assertNotEqual(tag, b"test")
# Tags describing the same should be equal
- tag_equal = Tag('Test', settings={})
+ tag_equal = Tag("Test", settings={})
self.assertEqual(tag, tag_equal)
# Author describing the same should be equal
- author_equal = Author('Test', settings={})
+ author_equal = Author("Test", settings={})
self.assertEqual(author, author_equal)
- cat_ascii = Category('指導書', settings={})
- self.assertEqual(cat_ascii, 'zhi dao shu')
+ cat_ascii = Category("指導書", settings={})
+ self.assertEqual(cat_ascii, "zhi dao shu")
def test_slugify_with_substitutions_and_dots(self):
- tag = Tag('Tag Dot', settings={'TAG_REGEX_SUBSTITUTIONS': [
- ('Tag Dot', 'tag.dot'),
- ]})
- cat = Category('Category Dot',
- settings={'CATEGORY_REGEX_SUBSTITUTIONS': [
- ('Category Dot', 'cat.dot'),
- ]})
+ tag = Tag(
+ "Tag Dot",
+ settings={
+ "TAG_REGEX_SUBSTITUTIONS": [
+ ("Tag Dot", "tag.dot"),
+ ]
+ },
+ )
+ cat = Category(
+ "Category Dot",
+ settings={
+ "CATEGORY_REGEX_SUBSTITUTIONS": [
+ ("Category Dot", "cat.dot"),
+ ]
+ },
+ )
- self.assertEqual(tag.slug, 'tag.dot')
- self.assertEqual(cat.slug, 'cat.dot')
+ self.assertEqual(tag.slug, "tag.dot")
+ self.assertEqual(cat.slug, "cat.dot")
def test_author_slug_substitutions(self):
- settings = {'AUTHOR_REGEX_SUBSTITUTIONS': [
- ('Alexander Todorov', 'atodorov'),
- ('Krasimir Tsonev', 'krasimir'),
- (r'[^\w\s-]', ''),
- (r'(?u)\A\s*', ''),
- (r'(?u)\s*\Z', ''),
- (r'[-\s]+', '-'),
- ]}
+ settings = {
+ "AUTHOR_REGEX_SUBSTITUTIONS": [
+ ("Alexander Todorov", "atodorov"),
+ ("Krasimir Tsonev", "krasimir"),
+ (r"[^\w\s-]", ""),
+ (r"(?u)\A\s*", ""),
+ (r"(?u)\s*\Z", ""),
+ (r"[-\s]+", "-"),
+ ]
+ }
- author1 = Author('Mr. Senko', settings=settings)
- author2 = Author('Alexander Todorov', settings=settings)
- author3 = Author('Krasimir Tsonev', settings=settings)
+ author1 = Author("Mr. Senko", settings=settings)
+ author2 = Author("Alexander Todorov", settings=settings)
+ author3 = Author("Krasimir Tsonev", settings=settings)
- self.assertEqual(author1.slug, 'mr-senko')
- self.assertEqual(author2.slug, 'atodorov')
- self.assertEqual(author3.slug, 'krasimir')
+ self.assertEqual(author1.slug, "mr-senko")
+ self.assertEqual(author2.slug, "atodorov")
+ self.assertEqual(author3.slug, "krasimir")
diff --git a/pelican/tests/test_utils.py b/pelican/tests/test_utils.py
index 40aff005..22dd8e38 100644
--- a/pelican/tests/test_utils.py
+++ b/pelican/tests/test_utils.py
@@ -14,25 +14,29 @@ except ModuleNotFoundError:
from pelican import utils
from pelican.generators import TemplatePagesGenerator
from pelican.settings import read_settings
-from pelican.tests.support import (LoggedTestCase, get_article,
- locale_available, unittest)
+from pelican.tests.support import (
+ LoggedTestCase,
+ get_article,
+ locale_available,
+ unittest,
+)
from pelican.writers import Writer
class TestUtils(LoggedTestCase):
- _new_attribute = 'new_value'
+ _new_attribute = "new_value"
def setUp(self):
super().setUp()
- self.temp_output = mkdtemp(prefix='pelicantests.')
+ self.temp_output = mkdtemp(prefix="pelicantests.")
def tearDown(self):
super().tearDown()
shutil.rmtree(self.temp_output)
@utils.deprecated_attribute(
- old='_old_attribute', new='_new_attribute',
- since=(3, 1, 0), remove=(4, 1, 3))
+ old="_old_attribute", new="_new_attribute", since=(3, 1, 0), remove=(4, 1, 3)
+ )
def _old_attribute():
return None
@@ -41,69 +45,109 @@ class TestUtils(LoggedTestCase):
self.assertEqual(value, self._new_attribute)
self.assertLogCountEqual(
count=1,
- msg=('_old_attribute has been deprecated since 3.1.0 and will be '
- 'removed by version 4.1.3. Use _new_attribute instead'),
- level=logging.WARNING)
+ msg=(
+ "_old_attribute has been deprecated since 3.1.0 and will be "
+ "removed by version 4.1.3. Use _new_attribute instead"
+ ),
+ level=logging.WARNING,
+ )
def test_get_date(self):
# valid ones
date = utils.SafeDatetime(year=2012, month=11, day=22)
- date_hour = utils.SafeDatetime(
- year=2012, month=11, day=22, hour=22, minute=11)
+ date_hour = utils.SafeDatetime(year=2012, month=11, day=22, hour=22, minute=11)
date_hour_z = utils.SafeDatetime(
- year=2012, month=11, day=22, hour=22, minute=11,
- tzinfo=timezone.utc)
+ year=2012, month=11, day=22, hour=22, minute=11, tzinfo=timezone.utc
+ )
date_hour_est = utils.SafeDatetime(
- year=2012, month=11, day=22, hour=22, minute=11,
- tzinfo=ZoneInfo("EST"))
+ year=2012, month=11, day=22, hour=22, minute=11, tzinfo=ZoneInfo("EST")
+ )
date_hour_sec = utils.SafeDatetime(
- year=2012, month=11, day=22, hour=22, minute=11, second=10)
+ year=2012, month=11, day=22, hour=22, minute=11, second=10
+ )
date_hour_sec_z = utils.SafeDatetime(
- year=2012, month=11, day=22, hour=22, minute=11, second=10,
- tzinfo=timezone.utc)
+ year=2012,
+ month=11,
+ day=22,
+ hour=22,
+ minute=11,
+ second=10,
+ tzinfo=timezone.utc,
+ )
date_hour_sec_est = utils.SafeDatetime(
- year=2012, month=11, day=22, hour=22, minute=11, second=10,
- tzinfo=ZoneInfo("EST"))
+ year=2012,
+ month=11,
+ day=22,
+ hour=22,
+ minute=11,
+ second=10,
+ tzinfo=ZoneInfo("EST"),
+ )
date_hour_sec_frac_z = utils.SafeDatetime(
- year=2012, month=11, day=22, hour=22, minute=11, second=10,
- microsecond=123000, tzinfo=timezone.utc)
+ year=2012,
+ month=11,
+ day=22,
+ hour=22,
+ minute=11,
+ second=10,
+ microsecond=123000,
+ tzinfo=timezone.utc,
+ )
dates = {
- '2012-11-22': date,
- '2012/11/22': date,
- '2012-11-22 22:11': date_hour,
- '2012/11/22 22:11': date_hour,
- '22-11-2012': date,
- '22/11/2012': date,
- '22.11.2012': date,
- '22.11.2012 22:11': date_hour,
- '2012-11-22T22:11Z': date_hour_z,
- '2012-11-22T22:11-0500': date_hour_est,
- '2012-11-22 22:11:10': date_hour_sec,
- '2012-11-22T22:11:10Z': date_hour_sec_z,
- '2012-11-22T22:11:10-0500': date_hour_sec_est,
- '2012-11-22T22:11:10.123Z': date_hour_sec_frac_z,
+ "2012-11-22": date,
+ "2012/11/22": date,
+ "2012-11-22 22:11": date_hour,
+ "2012/11/22 22:11": date_hour,
+ "22-11-2012": date,
+ "22/11/2012": date,
+ "22.11.2012": date,
+ "22.11.2012 22:11": date_hour,
+ "2012-11-22T22:11Z": date_hour_z,
+ "2012-11-22T22:11-0500": date_hour_est,
+ "2012-11-22 22:11:10": date_hour_sec,
+ "2012-11-22T22:11:10Z": date_hour_sec_z,
+ "2012-11-22T22:11:10-0500": date_hour_sec_est,
+ "2012-11-22T22:11:10.123Z": date_hour_sec_frac_z,
}
# examples from http://www.w3.org/TR/NOTE-datetime
iso_8601_date = utils.SafeDatetime(year=1997, month=7, day=16)
iso_8601_date_hour_tz = utils.SafeDatetime(
- year=1997, month=7, day=16, hour=19, minute=20,
- tzinfo=ZoneInfo("Europe/London"))
+ year=1997,
+ month=7,
+ day=16,
+ hour=19,
+ minute=20,
+ tzinfo=ZoneInfo("Europe/London"),
+ )
iso_8601_date_hour_sec_tz = utils.SafeDatetime(
- year=1997, month=7, day=16, hour=19, minute=20, second=30,
- tzinfo=ZoneInfo("Europe/London"))
+ year=1997,
+ month=7,
+ day=16,
+ hour=19,
+ minute=20,
+ second=30,
+ tzinfo=ZoneInfo("Europe/London"),
+ )
iso_8601_date_hour_sec_ms_tz = utils.SafeDatetime(
- year=1997, month=7, day=16, hour=19, minute=20, second=30,
- microsecond=450000, tzinfo=ZoneInfo("Europe/London"))
+ year=1997,
+ month=7,
+ day=16,
+ hour=19,
+ minute=20,
+ second=30,
+ microsecond=450000,
+ tzinfo=ZoneInfo("Europe/London"),
+ )
iso_8601 = {
- '1997-07-16': iso_8601_date,
- '1997-07-16T19:20+01:00': iso_8601_date_hour_tz,
- '1997-07-16T19:20:30+01:00': iso_8601_date_hour_sec_tz,
- '1997-07-16T19:20:30.45+01:00': iso_8601_date_hour_sec_ms_tz,
+ "1997-07-16": iso_8601_date,
+ "1997-07-16T19:20+01:00": iso_8601_date_hour_tz,
+ "1997-07-16T19:20:30+01:00": iso_8601_date_hour_sec_tz,
+ "1997-07-16T19:20:30.45+01:00": iso_8601_date_hour_sec_ms_tz,
}
# invalid ones
- invalid_dates = ['2010-110-12', 'yay']
+ invalid_dates = ["2010-110-12", "yay"]
for value, expected in dates.items():
self.assertEqual(utils.get_date(value), expected, value)
@@ -115,219 +159,247 @@ class TestUtils(LoggedTestCase):
self.assertRaises(ValueError, utils.get_date, item)
def test_slugify(self):
-
- samples = (('this is a test', 'this-is-a-test'),
- ('this is a test', 'this-is-a-test'),
- ('this → is ← a ↑ test', 'this-is-a-test'),
- ('this--is---a test', 'this-is-a-test'),
- ('unicode測試許功蓋,你看到了嗎?',
- 'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
- ('大飯原発4号機、18日夜起動へ',
- 'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)
+ samples = (
+ ("this is a test", "this-is-a-test"),
+ ("this is a test", "this-is-a-test"),
+ ("this → is ← a ↑ test", "this-is-a-test"),
+ ("this--is---a test", "this-is-a-test"),
+ (
+ "unicode測試許功蓋,你看到了嗎?",
+ "unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma",
+ ),
+ (
+ "大飯原発4号機、18日夜起動へ",
+ "da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he",
+ ),
+ )
settings = read_settings()
- subs = settings['SLUG_REGEX_SUBSTITUTIONS']
+ subs = settings["SLUG_REGEX_SUBSTITUTIONS"]
for value, expected in samples:
self.assertEqual(utils.slugify(value, regex_subs=subs), expected)
- self.assertEqual(utils.slugify('Cat', regex_subs=subs), 'cat')
+ self.assertEqual(utils.slugify("Cat", regex_subs=subs), "cat")
self.assertEqual(
- utils.slugify('Cat', regex_subs=subs, preserve_case=False), 'cat')
+ utils.slugify("Cat", regex_subs=subs, preserve_case=False), "cat"
+ )
self.assertEqual(
- utils.slugify('Cat', regex_subs=subs, preserve_case=True), 'Cat')
+ utils.slugify("Cat", regex_subs=subs, preserve_case=True), "Cat"
+ )
def test_slugify_use_unicode(self):
-
samples = (
- ('this is a test', 'this-is-a-test'),
- ('this is a test', 'this-is-a-test'),
- ('this → is ← a ↑ test', 'this-is-a-test'),
- ('this--is---a test', 'this-is-a-test'),
- ('unicode測試許功蓋,你看到了嗎?', 'unicode測試許功蓋你看到了嗎'),
- ('Çığ', 'çığ')
+ ("this is a test", "this-is-a-test"),
+ ("this is a test", "this-is-a-test"),
+ ("this → is ← a ↑ test", "this-is-a-test"),
+ ("this--is---a test", "this-is-a-test"),
+ ("unicode測試許功蓋,你看到了嗎?", "unicode測試許功蓋你看到了嗎"),
+ ("Çığ", "çığ"),
)
settings = read_settings()
- subs = settings['SLUG_REGEX_SUBSTITUTIONS']
+ subs = settings["SLUG_REGEX_SUBSTITUTIONS"]
for value, expected in samples:
self.assertEqual(
- utils.slugify(value, regex_subs=subs, use_unicode=True),
- expected)
+ utils.slugify(value, regex_subs=subs, use_unicode=True), expected
+ )
# check with preserve case
for value, expected in samples:
self.assertEqual(
- utils.slugify('Çığ', regex_subs=subs,
- preserve_case=True, use_unicode=True),
- 'Çığ')
+ utils.slugify(
+ "Çığ", regex_subs=subs, preserve_case=True, use_unicode=True
+ ),
+ "Çığ",
+ )
# check normalization
samples = (
- ('大飯原発4号機、18日夜起動へ', '大飯原発4号機18日夜起動へ'),
+ ("大飯原発4号機、18日夜起動へ", "大飯原発4号機18日夜起動へ"),
(
- '\N{LATIN SMALL LETTER C}\N{COMBINING CEDILLA}',
- '\N{LATIN SMALL LETTER C WITH CEDILLA}'
- )
+ "\N{LATIN SMALL LETTER C}\N{COMBINING CEDILLA}",
+ "\N{LATIN SMALL LETTER C WITH CEDILLA}",
+ ),
)
for value, expected in samples:
self.assertEqual(
- utils.slugify(value, regex_subs=subs, use_unicode=True),
- expected)
+ utils.slugify(value, regex_subs=subs, use_unicode=True), expected
+ )
def test_slugify_substitute(self):
-
- samples = (('C++ is based on C', 'cpp-is-based-on-c'),
- ('C+++ test C+ test', 'cpp-test-c-test'),
- ('c++, c#, C#, C++', 'cpp-c-sharp-c-sharp-cpp'),
- ('c++-streams', 'cpp-streams'),)
+ samples = (
+ ("C++ is based on C", "cpp-is-based-on-c"),
+ ("C+++ test C+ test", "cpp-test-c-test"),
+ ("c++, c#, C#, C++", "cpp-c-sharp-c-sharp-cpp"),
+ ("c++-streams", "cpp-streams"),
+ )
settings = read_settings()
subs = [
- (r'C\+\+', 'CPP'),
- (r'C#', 'C-SHARP'),
- ] + settings['SLUG_REGEX_SUBSTITUTIONS']
+ (r"C\+\+", "CPP"),
+ (r"C#", "C-SHARP"),
+ ] + settings["SLUG_REGEX_SUBSTITUTIONS"]
for value, expected in samples:
self.assertEqual(utils.slugify(value, regex_subs=subs), expected)
def test_slugify_substitute_and_keeping_non_alphanum(self):
-
- samples = (('Fedora QA', 'fedora.qa'),
- ('C++ is used by Fedora QA', 'cpp is used by fedora.qa'),
- ('C++ is based on C', 'cpp is based on c'),
- ('C+++ test C+ test', 'cpp+ test c+ test'),)
+ samples = (
+ ("Fedora QA", "fedora.qa"),
+ ("C++ is used by Fedora QA", "cpp is used by fedora.qa"),
+ ("C++ is based on C", "cpp is based on c"),
+ ("C+++ test C+ test", "cpp+ test c+ test"),
+ )
subs = [
- (r'Fedora QA', 'fedora.qa'),
- (r'c\+\+', 'cpp'),
+ (r"Fedora QA", "fedora.qa"),
+ (r"c\+\+", "cpp"),
]
for value, expected in samples:
self.assertEqual(utils.slugify(value, regex_subs=subs), expected)
def test_get_relative_path(self):
-
- samples = ((os.path.join('test', 'test.html'), os.pardir),
- (os.path.join('test', 'test', 'test.html'),
- os.path.join(os.pardir, os.pardir)),
- ('test.html', os.curdir),
- (os.path.join('/test', 'test.html'), os.pardir),
- (os.path.join('/test', 'test', 'test.html'),
- os.path.join(os.pardir, os.pardir)),
- ('/test.html', os.curdir),)
+ samples = (
+ (os.path.join("test", "test.html"), os.pardir),
+ (
+ os.path.join("test", "test", "test.html"),
+ os.path.join(os.pardir, os.pardir),
+ ),
+ ("test.html", os.curdir),
+ (os.path.join("/test", "test.html"), os.pardir),
+ (
+ os.path.join("/test", "test", "test.html"),
+ os.path.join(os.pardir, os.pardir),
+ ),
+ ("/test.html", os.curdir),
+ )
for value, expected in samples:
self.assertEqual(utils.get_relative_path(value), expected)
def test_truncate_html_words(self):
# Plain text.
+ self.assertEqual(utils.truncate_html_words("short string", 20), "short string")
self.assertEqual(
- utils.truncate_html_words('short string', 20),
- 'short string')
- self.assertEqual(
- utils.truncate_html_words('word ' * 100, 20),
- 'word ' * 20 + '…')
+ utils.truncate_html_words("word " * 100, 20), "word " * 20 + "…"
+ )
# Plain text with Unicode content.
self.assertEqual(
utils.truncate_html_words(
- '我愿意这样,朋友——我独自远行,不但没有你,\
- 并且再没有别的影在黑暗里。', 12
+ "我愿意这样,朋友——我独自远行,不但没有你,\
+ 并且再没有别的影在黑暗里。",
+ 12,
),
- '我愿意这样,朋友——我独自远行' + ' …')
+ "我愿意这样,朋友——我独自远行" + " …",
+ )
self.assertEqual(
utils.truncate_html_words(
- 'Ты мелькнула, ты предстала, Снова сердце задрожало,', 3
+ "Ты мелькнула, ты предстала, Снова сердце задрожало,", 3
),
- 'Ты мелькнула, ты' + ' …')
+ "Ты мелькнула, ты" + " …",
+ )
self.assertEqual(
- utils.truncate_html_words(
- 'Trong đầm gì đẹp bằng sen', 4
- ),
- 'Trong đầm gì đẹp' + ' …')
+ utils.truncate_html_words("Trong đầm gì đẹp bằng sen", 4),
+ "Trong đầm gì đẹp" + " …",
+ )
# Words enclosed or intervaled by HTML tags.
self.assertEqual(
- utils.truncate_html_words('' + 'word ' * 100 + '
', 20),
- '' + 'word ' * 20 + '…
')
+ utils.truncate_html_words("" + "word " * 100 + "
", 20),
+ "" + "word " * 20 + "…
",
+ )
self.assertEqual(
utils.truncate_html_words(
- '' + 'word ' * 100 + ' ', 20),
- '' + 'word ' * 20 + '… ')
+ '' + "word " * 100 + " ", 20
+ ),
+ '' + "word " * 20 + "… ",
+ )
self.assertEqual(
- utils.truncate_html_words(' ' + 'word ' * 100, 20),
- ' ' + 'word ' * 20 + '…')
+ utils.truncate_html_words(" " + "word " * 100, 20),
+ " " + "word " * 20 + "…",
+ )
self.assertEqual(
- utils.truncate_html_words('' + 'word ' * 100, 20),
- '' + 'word ' * 20 + '…')
+ utils.truncate_html_words("" + "word " * 100, 20),
+ "" + "word " * 20 + "…",
+ )
# Words enclosed or intervaled by HTML tags with a custom end
# marker containing HTML tags.
self.assertEqual(
- utils.truncate_html_words('' + 'word ' * 100 + '
', 20,
- 'marker '),
- '' + 'word ' * 20 + 'marker
')
+ utils.truncate_html_words(
+ "" + "word " * 100 + "
", 20, "marker "
+ ),
+ "" + "word " * 20 + "marker
",
+ )
self.assertEqual(
utils.truncate_html_words(
- '' + 'word ' * 100 + ' ', 20,
- 'marker '),
- '' + 'word ' * 20 + 'marker ')
+ '' + "word " * 100 + " ",
+ 20,
+ "marker ",
+ ),
+ '' + "word " * 20 + "marker ",
+ )
self.assertEqual(
- utils.truncate_html_words(' ' + 'word ' * 100, 20,
- 'marker '),
- ' ' + 'word ' * 20 + 'marker ')
+ utils.truncate_html_words(
+ " " + "word " * 100, 20, "marker "
+ ),
+ " " + "word " * 20 + "marker ",
+ )
self.assertEqual(
- utils.truncate_html_words('' + 'word ' * 100, 20,
- 'marker '),
- '' + 'word ' * 20 + 'marker ')
+ utils.truncate_html_words(
+ "" + "word " * 100, 20, "marker "
+ ),
+ "" + "word " * 20 + "marker ",
+ )
# Words with hypens and apostrophes.
+ self.assertEqual(utils.truncate_html_words("a-b " * 100, 20), "a-b " * 20 + "…")
self.assertEqual(
- utils.truncate_html_words("a-b " * 100, 20),
- "a-b " * 20 + '…')
- self.assertEqual(
- utils.truncate_html_words("it's " * 100, 20),
- "it's " * 20 + '…')
+ utils.truncate_html_words("it's " * 100, 20), "it's " * 20 + "…"
+ )
# Words with HTML entity references.
self.assertEqual(
- utils.truncate_html_words("é " * 100, 20),
- "é " * 20 + '…')
+ utils.truncate_html_words("é " * 100, 20), "é " * 20 + "…"
+ )
self.assertEqual(
utils.truncate_html_words("café " * 100, 20),
- "café " * 20 + '…')
+ "café " * 20 + "…",
+ )
self.assertEqual(
utils.truncate_html_words("èlite " * 100, 20),
- "èlite " * 20 + '…')
+ "èlite " * 20 + "…",
+ )
self.assertEqual(
utils.truncate_html_words("cafetiére " * 100, 20),
- "cafetiére " * 20 + '…')
+ "cafetiére " * 20 + "…",
+ )
self.assertEqual(
- utils.truncate_html_words("∫dx " * 100, 20),
- "∫dx " * 20 + '…')
+ utils.truncate_html_words("∫dx " * 100, 20), "∫dx " * 20 + "…"
+ )
# Words with HTML character references inside and outside
# the ASCII range.
self.assertEqual(
- utils.truncate_html_words("é " * 100, 20),
- "é " * 20 + '…')
+ utils.truncate_html_words("é " * 100, 20), "é " * 20 + "…"
+ )
self.assertEqual(
- utils.truncate_html_words("∫dx " * 100, 20),
- "∫dx " * 20 + '…')
+ utils.truncate_html_words("∫dx " * 100, 20), "∫dx " * 20 + "…"
+ )
# Words with invalid or broken HTML references.
+ self.assertEqual(utils.truncate_html_words("&invalid;", 20), "&invalid;")
self.assertEqual(
- utils.truncate_html_words('&invalid;', 20), '&invalid;')
+ utils.truncate_html_words("", 20), ""
+ )
self.assertEqual(
- utils.truncate_html_words('', 20), '')
- self.assertEqual(
- utils.truncate_html_words('', 20), '')
- self.assertEqual(
- utils.truncate_html_words('&mdash text', 20), '&mdash text')
- self.assertEqual(
- utils.truncate_html_words('Ӓ text', 20), 'Ӓ text')
- self.assertEqual(
- utils.truncate_html_words('઼ text', 20), '઼ text')
+ utils.truncate_html_words("", 20), ""
+ )
+ self.assertEqual(utils.truncate_html_words("&mdash text", 20), "&mdash text")
+ self.assertEqual(utils.truncate_html_words("Ӓ text", 20), "Ӓ text")
+ self.assertEqual(utils.truncate_html_words("઼ text", 20), "઼ text")
def test_process_translations(self):
fr_articles = []
@@ -335,65 +407,135 @@ class TestUtils(LoggedTestCase):
# create a bunch of articles
# 0: no translation metadata
- fr_articles.append(get_article(lang='fr', slug='yay0', title='Titre',
- content='en français'))
- en_articles.append(get_article(lang='en', slug='yay0', title='Title',
- content='in english'))
+ fr_articles.append(
+ get_article(lang="fr", slug="yay0", title="Titre", content="en français")
+ )
+ en_articles.append(
+ get_article(lang="en", slug="yay0", title="Title", content="in english")
+ )
# 1: translation metadata on default lang
- fr_articles.append(get_article(lang='fr', slug='yay1', title='Titre',
- content='en français'))
- en_articles.append(get_article(lang='en', slug='yay1', title='Title',
- content='in english',
- translation='true'))
+ fr_articles.append(
+ get_article(lang="fr", slug="yay1", title="Titre", content="en français")
+ )
+ en_articles.append(
+ get_article(
+ lang="en",
+ slug="yay1",
+ title="Title",
+ content="in english",
+ translation="true",
+ )
+ )
# 2: translation metadata not on default lang
- fr_articles.append(get_article(lang='fr', slug='yay2', title='Titre',
- content='en français',
- translation='true'))
- en_articles.append(get_article(lang='en', slug='yay2', title='Title',
- content='in english'))
+ fr_articles.append(
+ get_article(
+ lang="fr",
+ slug="yay2",
+ title="Titre",
+ content="en français",
+ translation="true",
+ )
+ )
+ en_articles.append(
+ get_article(lang="en", slug="yay2", title="Title", content="in english")
+ )
# 3: back to default language detection if all items have the
# translation metadata
- fr_articles.append(get_article(lang='fr', slug='yay3', title='Titre',
- content='en français',
- translation='yep'))
- en_articles.append(get_article(lang='en', slug='yay3', title='Title',
- content='in english',
- translation='yes'))
+ fr_articles.append(
+ get_article(
+ lang="fr",
+ slug="yay3",
+ title="Titre",
+ content="en français",
+ translation="yep",
+ )
+ )
+ en_articles.append(
+ get_article(
+ lang="en",
+ slug="yay3",
+ title="Title",
+ content="in english",
+ translation="yes",
+ )
+ )
# 4-5: translation pairs with the same slug but different category
- fr_articles.append(get_article(lang='fr', slug='yay4', title='Titre',
- content='en français', category='foo'))
- en_articles.append(get_article(lang='en', slug='yay4', title='Title',
- content='in english', category='foo'))
- fr_articles.append(get_article(lang='fr', slug='yay4', title='Titre',
- content='en français', category='bar'))
- en_articles.append(get_article(lang='en', slug='yay4', title='Title',
- content='in english', category='bar'))
+ fr_articles.append(
+ get_article(
+ lang="fr",
+ slug="yay4",
+ title="Titre",
+ content="en français",
+ category="foo",
+ )
+ )
+ en_articles.append(
+ get_article(
+ lang="en",
+ slug="yay4",
+ title="Title",
+ content="in english",
+ category="foo",
+ )
+ )
+ fr_articles.append(
+ get_article(
+ lang="fr",
+ slug="yay4",
+ title="Titre",
+ content="en français",
+ category="bar",
+ )
+ )
+ en_articles.append(
+ get_article(
+ lang="en",
+ slug="yay4",
+ title="Title",
+ content="in english",
+ category="bar",
+ )
+ )
# try adding articles in both orders
- for lang0_articles, lang1_articles in ((fr_articles, en_articles),
- (en_articles, fr_articles)):
+ for lang0_articles, lang1_articles in (
+ (fr_articles, en_articles),
+ (en_articles, fr_articles),
+ ):
articles = lang0_articles + lang1_articles
# test process_translations with falsy translation_id
- index, trans = utils.process_translations(
- articles, translation_id=None)
+ index, trans = utils.process_translations(articles, translation_id=None)
for i in range(6):
for lang_articles in [en_articles, fr_articles]:
self.assertIn(lang_articles[i], index)
self.assertNotIn(lang_articles[i], trans)
# test process_translations with simple and complex translation_id
- for translation_id in ['slug', {'slug', 'category'}]:
+ for translation_id in ["slug", {"slug", "category"}]:
index, trans = utils.process_translations(
- articles, translation_id=translation_id)
+ articles, translation_id=translation_id
+ )
- for a in [en_articles[0], fr_articles[1], en_articles[2],
- en_articles[3], en_articles[4], en_articles[5]]:
+ for a in [
+ en_articles[0],
+ fr_articles[1],
+ en_articles[2],
+ en_articles[3],
+ en_articles[4],
+ en_articles[5],
+ ]:
self.assertIn(a, index)
self.assertNotIn(a, trans)
- for a in [fr_articles[0], en_articles[1], fr_articles[2],
- fr_articles[3], fr_articles[4], fr_articles[5]]:
+ for a in [
+ fr_articles[0],
+ en_articles[1],
+ fr_articles[2],
+ fr_articles[3],
+ fr_articles[4],
+ fr_articles[5],
+ ]:
self.assertIn(a, trans)
self.assertNotIn(a, index)
@@ -403,18 +545,17 @@ class TestUtils(LoggedTestCase):
for a_arts in [en_articles, fr_articles]:
for b_arts in [en_articles, fr_articles]:
- if translation_id == 'slug':
+ if translation_id == "slug":
self.assertIn(a_arts[4], b_arts[5].translations)
self.assertIn(a_arts[5], b_arts[4].translations)
- elif translation_id == {'slug', 'category'}:
+ elif translation_id == {"slug", "category"}:
self.assertNotIn(a_arts[4], b_arts[5].translations)
self.assertNotIn(a_arts[5], b_arts[4].translations)
def test_clean_output_dir(self):
retention = ()
- test_directory = os.path.join(self.temp_output,
- 'clean_output')
- content = os.path.join(os.path.dirname(__file__), 'content')
+ test_directory = os.path.join(self.temp_output, "clean_output")
+ content = os.path.join(os.path.dirname(__file__), "content")
shutil.copytree(content, test_directory)
utils.clean_output_dir(test_directory, retention)
self.assertTrue(os.path.isdir(test_directory))
@@ -423,17 +564,15 @@ class TestUtils(LoggedTestCase):
def test_clean_output_dir_not_there(self):
retention = ()
- test_directory = os.path.join(self.temp_output,
- 'does_not_exist')
+ test_directory = os.path.join(self.temp_output, "does_not_exist")
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
def test_clean_output_dir_is_file(self):
retention = ()
- test_directory = os.path.join(self.temp_output,
- 'this_is_a_file')
- f = open(test_directory, 'w')
- f.write('')
+ test_directory = os.path.join(self.temp_output, "this_is_a_file")
+ f = open(test_directory, "w")
+ f.write("")
f.close()
utils.clean_output_dir(test_directory, retention)
self.assertFalse(os.path.exists(test_directory))
@@ -442,223 +581,230 @@ class TestUtils(LoggedTestCase):
d = utils.SafeDatetime(2012, 8, 29)
# simple formatting
- self.assertEqual(utils.strftime(d, '%d/%m/%y'), '29/08/12')
- self.assertEqual(utils.strftime(d, '%d/%m/%Y'), '29/08/2012')
+ self.assertEqual(utils.strftime(d, "%d/%m/%y"), "29/08/12")
+ self.assertEqual(utils.strftime(d, "%d/%m/%Y"), "29/08/2012")
# RFC 3339
self.assertEqual(
- utils.strftime(d, '%Y-%m-%dT%H:%M:%SZ'),
- '2012-08-29T00:00:00Z')
+ utils.strftime(d, "%Y-%m-%dT%H:%M:%SZ"), "2012-08-29T00:00:00Z"
+ )
# % escaped
- self.assertEqual(utils.strftime(d, '%d%%%m%%%y'), '29%08%12')
- self.assertEqual(utils.strftime(d, '%d %% %m %% %y'), '29 % 08 % 12')
+ self.assertEqual(utils.strftime(d, "%d%%%m%%%y"), "29%08%12")
+ self.assertEqual(utils.strftime(d, "%d %% %m %% %y"), "29 % 08 % 12")
# not valid % formatter
- self.assertEqual(utils.strftime(d, '10% reduction in %Y'),
- '10% reduction in 2012')
- self.assertEqual(utils.strftime(d, '%10 reduction in %Y'),
- '%10 reduction in 2012')
+ self.assertEqual(
+ utils.strftime(d, "10% reduction in %Y"), "10% reduction in 2012"
+ )
+ self.assertEqual(
+ utils.strftime(d, "%10 reduction in %Y"), "%10 reduction in 2012"
+ )
# with text
- self.assertEqual(utils.strftime(d, 'Published in %d-%m-%Y'),
- 'Published in 29-08-2012')
+ self.assertEqual(
+ utils.strftime(d, "Published in %d-%m-%Y"), "Published in 29-08-2012"
+ )
# with non-ascii text
self.assertEqual(
- utils.strftime(d, '%d/%m/%Y Øl trinken beim Besäufnis'),
- '29/08/2012 Øl trinken beim Besäufnis')
+ utils.strftime(d, "%d/%m/%Y Øl trinken beim Besäufnis"),
+ "29/08/2012 Øl trinken beim Besäufnis",
+ )
# alternative formatting options
- self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '29/8/12')
- self.assertEqual(utils.strftime(d, '%-H:%-M:%-S'), '0:0:0')
+ self.assertEqual(utils.strftime(d, "%-d/%-m/%y"), "29/8/12")
+ self.assertEqual(utils.strftime(d, "%-H:%-M:%-S"), "0:0:0")
d = utils.SafeDatetime(2012, 8, 9)
- self.assertEqual(utils.strftime(d, '%-d/%-m/%y'), '9/8/12')
+ self.assertEqual(utils.strftime(d, "%-d/%-m/%y"), "9/8/12")
d = utils.SafeDatetime(2021, 1, 8)
- self.assertEqual(utils.strftime(d, '%G - %-V - %u'), '2021 - 1 - 5')
+ self.assertEqual(utils.strftime(d, "%G - %-V - %u"), "2021 - 1 - 5")
# test the output of utils.strftime in a different locale
# Turkish locale
- @unittest.skipUnless(locale_available('tr_TR.UTF-8') or
- locale_available('Turkish'),
- 'Turkish locale needed')
+ @unittest.skipUnless(
+ locale_available("tr_TR.UTF-8") or locale_available("Turkish"),
+ "Turkish locale needed",
+ )
def test_strftime_locale_dependent_turkish(self):
- temp_locale = 'Turkish' if platform == 'win32' else 'tr_TR.UTF-8'
+ temp_locale = "Turkish" if platform == "win32" else "tr_TR.UTF-8"
with utils.temporary_locale(temp_locale):
d = utils.SafeDatetime(2012, 8, 29)
# simple
- self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 Ağustos 2012')
- self.assertEqual(utils.strftime(d, '%A, %d %B %Y'),
- 'Çarşamba, 29 Ağustos 2012')
+ self.assertEqual(utils.strftime(d, "%d %B %Y"), "29 Ağustos 2012")
+ self.assertEqual(
+ utils.strftime(d, "%A, %d %B %Y"), "Çarşamba, 29 Ağustos 2012"
+ )
# with text
self.assertEqual(
- utils.strftime(d, 'Yayınlanma tarihi: %A, %d %B %Y'),
- 'Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012')
+ utils.strftime(d, "Yayınlanma tarihi: %A, %d %B %Y"),
+ "Yayınlanma tarihi: Çarşamba, 29 Ağustos 2012",
+ )
# non-ascii format candidate (someone might pass it… for some reason)
self.assertEqual(
- utils.strftime(d, '%Y yılında %üretim artışı'),
- '2012 yılında %üretim artışı')
+ utils.strftime(d, "%Y yılında %üretim artışı"),
+ "2012 yılında %üretim artışı",
+ )
# test the output of utils.strftime in a different locale
# French locale
- @unittest.skipUnless(locale_available('fr_FR.UTF-8') or
- locale_available('French'),
- 'French locale needed')
+ @unittest.skipUnless(
+ locale_available("fr_FR.UTF-8") or locale_available("French"),
+ "French locale needed",
+ )
def test_strftime_locale_dependent_french(self):
- temp_locale = 'French' if platform == 'win32' else 'fr_FR.UTF-8'
+ temp_locale = "French" if platform == "win32" else "fr_FR.UTF-8"
with utils.temporary_locale(temp_locale):
d = utils.SafeDatetime(2012, 8, 29)
# simple
- self.assertEqual(utils.strftime(d, '%d %B %Y'), '29 août 2012')
+ self.assertEqual(utils.strftime(d, "%d %B %Y"), "29 août 2012")
# depending on OS, the first letter is m or M
- self.assertTrue(utils.strftime(d, '%A') in ('mercredi', 'Mercredi'))
+ self.assertTrue(utils.strftime(d, "%A") in ("mercredi", "Mercredi"))
# with text
self.assertEqual(
- utils.strftime(d, 'Écrit le %d %B %Y'),
- 'Écrit le 29 août 2012')
+ utils.strftime(d, "Écrit le %d %B %Y"), "Écrit le 29 août 2012"
+ )
# non-ascii format candidate (someone might pass it… for some reason)
- self.assertEqual(
- utils.strftime(d, '%écrits en %Y'),
- '%écrits en 2012')
+ self.assertEqual(utils.strftime(d, "%écrits en %Y"), "%écrits en 2012")
def test_maybe_pluralize(self):
- self.assertEqual(
- utils.maybe_pluralize(0, 'Article', 'Articles'),
- '0 Articles')
- self.assertEqual(
- utils.maybe_pluralize(1, 'Article', 'Articles'),
- '1 Article')
- self.assertEqual(
- utils.maybe_pluralize(2, 'Article', 'Articles'),
- '2 Articles')
+ self.assertEqual(utils.maybe_pluralize(0, "Article", "Articles"), "0 Articles")
+ self.assertEqual(utils.maybe_pluralize(1, "Article", "Articles"), "1 Article")
+ self.assertEqual(utils.maybe_pluralize(2, "Article", "Articles"), "2 Articles")
def test_temporary_locale(self):
# test with default LC category
orig_locale = locale.setlocale(locale.LC_ALL)
- with utils.temporary_locale('C'):
- self.assertEqual(locale.setlocale(locale.LC_ALL), 'C')
+ with utils.temporary_locale("C"):
+ self.assertEqual(locale.setlocale(locale.LC_ALL), "C")
self.assertEqual(locale.setlocale(locale.LC_ALL), orig_locale)
# test with custom LC category
orig_locale = locale.setlocale(locale.LC_TIME)
- with utils.temporary_locale('C', locale.LC_TIME):
- self.assertEqual(locale.setlocale(locale.LC_TIME), 'C')
+ with utils.temporary_locale("C", locale.LC_TIME):
+ self.assertEqual(locale.setlocale(locale.LC_TIME), "C")
self.assertEqual(locale.setlocale(locale.LC_TIME), orig_locale)
class TestCopy(unittest.TestCase):
- '''Tests the copy utility'''
+ """Tests the copy utility"""
def setUp(self):
- self.root_dir = mkdtemp(prefix='pelicantests.')
+ self.root_dir = mkdtemp(prefix="pelicantests.")
self.old_locale = locale.setlocale(locale.LC_ALL)
- locale.setlocale(locale.LC_ALL, 'C')
+ locale.setlocale(locale.LC_ALL, "C")
def tearDown(self):
shutil.rmtree(self.root_dir)
locale.setlocale(locale.LC_ALL, self.old_locale)
def _create_file(self, *path):
- with open(os.path.join(self.root_dir, *path), 'w') as f:
- f.write('42\n')
+ with open(os.path.join(self.root_dir, *path), "w") as f:
+ f.write("42\n")
def _create_dir(self, *path):
os.makedirs(os.path.join(self.root_dir, *path))
def _exist_file(self, *path):
path = os.path.join(self.root_dir, *path)
- self.assertTrue(os.path.isfile(path), 'File does not exist: %s' % path)
+ self.assertTrue(os.path.isfile(path), "File does not exist: %s" % path)
def _exist_dir(self, *path):
path = os.path.join(self.root_dir, *path)
- self.assertTrue(os.path.exists(path),
- 'Directory does not exist: %s' % path)
+ self.assertTrue(os.path.exists(path), "Directory does not exist: %s" % path)
def test_copy_file_same_path(self):
- self._create_file('a.txt')
- utils.copy(os.path.join(self.root_dir, 'a.txt'),
- os.path.join(self.root_dir, 'b.txt'))
- self._exist_file('b.txt')
+ self._create_file("a.txt")
+ utils.copy(
+ os.path.join(self.root_dir, "a.txt"), os.path.join(self.root_dir, "b.txt")
+ )
+ self._exist_file("b.txt")
def test_copy_file_different_path(self):
- self._create_dir('a')
- self._create_dir('b')
- self._create_file('a', 'a.txt')
- utils.copy(os.path.join(self.root_dir, 'a', 'a.txt'),
- os.path.join(self.root_dir, 'b', 'b.txt'))
- self._exist_dir('b')
- self._exist_file('b', 'b.txt')
+ self._create_dir("a")
+ self._create_dir("b")
+ self._create_file("a", "a.txt")
+ utils.copy(
+ os.path.join(self.root_dir, "a", "a.txt"),
+ os.path.join(self.root_dir, "b", "b.txt"),
+ )
+ self._exist_dir("b")
+ self._exist_file("b", "b.txt")
def test_copy_file_create_dirs(self):
- self._create_file('a.txt')
+ self._create_file("a.txt")
utils.copy(
- os.path.join(self.root_dir, 'a.txt'),
- os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b.txt'))
- self._exist_dir('b0')
- self._exist_dir('b0', 'b1')
- self._exist_dir('b0', 'b1', 'b2')
- self._exist_dir('b0', 'b1', 'b2', 'b3')
- self._exist_file('b0', 'b1', 'b2', 'b3', 'b.txt')
+ os.path.join(self.root_dir, "a.txt"),
+ os.path.join(self.root_dir, "b0", "b1", "b2", "b3", "b.txt"),
+ )
+ self._exist_dir("b0")
+ self._exist_dir("b0", "b1")
+ self._exist_dir("b0", "b1", "b2")
+ self._exist_dir("b0", "b1", "b2", "b3")
+ self._exist_file("b0", "b1", "b2", "b3", "b.txt")
def test_copy_dir_same_path(self):
- self._create_dir('a')
- self._create_file('a', 'a.txt')
- utils.copy(os.path.join(self.root_dir, 'a'),
- os.path.join(self.root_dir, 'b'))
- self._exist_dir('b')
- self._exist_file('b', 'a.txt')
+ self._create_dir("a")
+ self._create_file("a", "a.txt")
+ utils.copy(os.path.join(self.root_dir, "a"), os.path.join(self.root_dir, "b"))
+ self._exist_dir("b")
+ self._exist_file("b", "a.txt")
def test_copy_dir_different_path(self):
- self._create_dir('a0')
- self._create_dir('a0', 'a1')
- self._create_file('a0', 'a1', 'a.txt')
- self._create_dir('b0')
- utils.copy(os.path.join(self.root_dir, 'a0', 'a1'),
- os.path.join(self.root_dir, 'b0', 'b1'))
- self._exist_dir('b0', 'b1')
- self._exist_file('b0', 'b1', 'a.txt')
+ self._create_dir("a0")
+ self._create_dir("a0", "a1")
+ self._create_file("a0", "a1", "a.txt")
+ self._create_dir("b0")
+ utils.copy(
+ os.path.join(self.root_dir, "a0", "a1"),
+ os.path.join(self.root_dir, "b0", "b1"),
+ )
+ self._exist_dir("b0", "b1")
+ self._exist_file("b0", "b1", "a.txt")
def test_copy_dir_create_dirs(self):
- self._create_dir('a')
- self._create_file('a', 'a.txt')
- utils.copy(os.path.join(self.root_dir, 'a'),
- os.path.join(self.root_dir, 'b0', 'b1', 'b2', 'b3', 'b'))
- self._exist_dir('b0')
- self._exist_dir('b0', 'b1')
- self._exist_dir('b0', 'b1', 'b2')
- self._exist_dir('b0', 'b1', 'b2', 'b3')
- self._exist_dir('b0', 'b1', 'b2', 'b3', 'b')
- self._exist_file('b0', 'b1', 'b2', 'b3', 'b', 'a.txt')
+ self._create_dir("a")
+ self._create_file("a", "a.txt")
+ utils.copy(
+ os.path.join(self.root_dir, "a"),
+ os.path.join(self.root_dir, "b0", "b1", "b2", "b3", "b"),
+ )
+ self._exist_dir("b0")
+ self._exist_dir("b0", "b1")
+ self._exist_dir("b0", "b1", "b2")
+ self._exist_dir("b0", "b1", "b2", "b3")
+ self._exist_dir("b0", "b1", "b2", "b3", "b")
+ self._exist_file("b0", "b1", "b2", "b3", "b", "a.txt")
class TestDateFormatter(unittest.TestCase):
- '''Tests that the output of DateFormatter jinja filter is same as
- utils.strftime'''
+ """Tests that the output of DateFormatter jinja filter is same as
+ utils.strftime"""
def setUp(self):
# prepare a temp content and output folder
- self.temp_content = mkdtemp(prefix='pelicantests.')
- self.temp_output = mkdtemp(prefix='pelicantests.')
+ self.temp_content = mkdtemp(prefix="pelicantests.")
+ self.temp_output = mkdtemp(prefix="pelicantests.")
# prepare a template file
- template_dir = os.path.join(self.temp_content, 'template')
- template_path = os.path.join(template_dir, 'source.html')
+ template_dir = os.path.join(self.temp_content, "template")
+ template_path = os.path.join(template_dir, "source.html")
os.makedirs(template_dir)
- with open(template_path, 'w') as template_file:
+ with open(template_path, "w") as template_file:
template_file.write('date = {{ date|strftime("%A, %d %B %Y") }}')
self.date = utils.SafeDatetime(2012, 8, 29)
@@ -666,136 +812,128 @@ class TestDateFormatter(unittest.TestCase):
shutil.rmtree(self.temp_content)
shutil.rmtree(self.temp_output)
# reset locale to default
- locale.setlocale(locale.LC_ALL, '')
+ locale.setlocale(locale.LC_ALL, "")
- @unittest.skipUnless(locale_available('fr_FR.UTF-8') or
- locale_available('French'),
- 'French locale needed')
+ @unittest.skipUnless(
+ locale_available("fr_FR.UTF-8") or locale_available("French"),
+ "French locale needed",
+ )
def test_french_strftime(self):
# This test tries to reproduce an issue that
# occurred with python3.3 under macos10 only
- temp_locale = 'French' if platform == 'win32' else 'fr_FR.UTF-8'
+ temp_locale = "French" if platform == "win32" else "fr_FR.UTF-8"
with utils.temporary_locale(temp_locale):
date = utils.SafeDatetime(2014, 8, 14)
# we compare the lower() dates since macos10 returns
# "Jeudi" for %A whereas linux reports "jeudi"
self.assertEqual(
- 'jeudi, 14 août 2014',
- utils.strftime(date, date_format="%A, %d %B %Y").lower())
+ "jeudi, 14 août 2014",
+ utils.strftime(date, date_format="%A, %d %B %Y").lower(),
+ )
df = utils.DateFormatter()
self.assertEqual(
- 'jeudi, 14 août 2014',
- df(date, date_format="%A, %d %B %Y").lower())
+ "jeudi, 14 août 2014", df(date, date_format="%A, %d %B %Y").lower()
+ )
# Let us now set the global locale to C:
- with utils.temporary_locale('C'):
+ with utils.temporary_locale("C"):
# DateFormatter should still work as expected
# since it is the whole point of DateFormatter
# (This is where pre-2014/4/15 code fails on macos10)
df_date = df(date, date_format="%A, %d %B %Y").lower()
- self.assertEqual('jeudi, 14 août 2014', df_date)
+ self.assertEqual("jeudi, 14 août 2014", df_date)
- @unittest.skipUnless(locale_available('fr_FR.UTF-8') or
- locale_available('French'),
- 'French locale needed')
+ @unittest.skipUnless(
+ locale_available("fr_FR.UTF-8") or locale_available("French"),
+ "French locale needed",
+ )
def test_french_locale(self):
- if platform == 'win32':
- locale_string = 'French'
+ if platform == "win32":
+ locale_string = "French"
else:
- locale_string = 'fr_FR.UTF-8'
+ locale_string = "fr_FR.UTF-8"
settings = read_settings(
override={
- 'LOCALE': locale_string,
- 'TEMPLATE_PAGES': {
- 'template/source.html': 'generated/file.html'
- }
- })
+ "LOCALE": locale_string,
+ "TEMPLATE_PAGES": {"template/source.html": "generated/file.html"},
+ }
+ )
generator = TemplatePagesGenerator(
- {'date': self.date}, settings,
- self.temp_content, '', self.temp_output)
- generator.env.filters.update({'strftime': utils.DateFormatter()})
+ {"date": self.date}, settings, self.temp_content, "", self.temp_output
+ )
+ generator.env.filters.update({"strftime": utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
- output_path = os.path.join(
- self.temp_output, 'generated', 'file.html')
+ output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
- self.assertEqual(output_file,
- utils.strftime(self.date, 'date = %A, %d %B %Y'))
+ self.assertEqual(
+ output_file, utils.strftime(self.date, "date = %A, %d %B %Y")
+ )
- @unittest.skipUnless(locale_available('tr_TR.UTF-8') or
- locale_available('Turkish'),
- 'Turkish locale needed')
+ @unittest.skipUnless(
+ locale_available("tr_TR.UTF-8") or locale_available("Turkish"),
+ "Turkish locale needed",
+ )
def test_turkish_locale(self):
- if platform == 'win32':
- locale_string = 'Turkish'
+ if platform == "win32":
+ locale_string = "Turkish"
else:
- locale_string = 'tr_TR.UTF-8'
+ locale_string = "tr_TR.UTF-8"
settings = read_settings(
override={
- 'LOCALE': locale_string,
- 'TEMPLATE_PAGES': {
- 'template/source.html': 'generated/file.html'
- }
- })
+ "LOCALE": locale_string,
+ "TEMPLATE_PAGES": {"template/source.html": "generated/file.html"},
+ }
+ )
generator = TemplatePagesGenerator(
- {'date': self.date}, settings,
- self.temp_content, '', self.temp_output)
- generator.env.filters.update({'strftime': utils.DateFormatter()})
+ {"date": self.date}, settings, self.temp_content, "", self.temp_output
+ )
+ generator.env.filters.update({"strftime": utils.DateFormatter()})
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
- output_path = os.path.join(
- self.temp_output, 'generated', 'file.html')
+ output_path = os.path.join(self.temp_output, "generated", "file.html")
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with utils.pelican_open(output_path) as output_file:
- self.assertEqual(output_file,
- utils.strftime(self.date, 'date = %A, %d %B %Y'))
+ self.assertEqual(
+ output_file, utils.strftime(self.date, "date = %A, %d %B %Y")
+ )
class TestSanitisedJoin(unittest.TestCase):
def test_detect_parent_breakout(self):
with self.assertRaisesRegex(
- RuntimeError,
- "Attempted to break out of output directory to "
- "(.*?:)?/foo/test"): # (.*?:)? accounts for Windows root
- utils.sanitised_join(
- "/foo/bar",
- "../test"
- )
+ RuntimeError,
+ "Attempted to break out of output directory to " "(.*?:)?/foo/test",
+ ): # (.*?:)? accounts for Windows root
+ utils.sanitised_join("/foo/bar", "../test")
def test_detect_root_breakout(self):
with self.assertRaisesRegex(
- RuntimeError,
- "Attempted to break out of output directory to "
- "(.*?:)?/test"): # (.*?:)? accounts for Windows root
- utils.sanitised_join(
- "/foo/bar",
- "/test"
- )
+ RuntimeError,
+ "Attempted to break out of output directory to " "(.*?:)?/test",
+ ): # (.*?:)? accounts for Windows root
+ utils.sanitised_join("/foo/bar", "/test")
def test_pass_deep_subpaths(self):
self.assertEqual(
- utils.sanitised_join(
- "/foo/bar",
- "test"
- ),
- utils.posixize_path(
- os.path.abspath(os.path.join("/foo/bar", "test")))
+ utils.sanitised_join("/foo/bar", "test"),
+ utils.posixize_path(os.path.abspath(os.path.join("/foo/bar", "test"))),
)
@@ -812,7 +950,7 @@ class TestMemoized(unittest.TestCase):
container = Container()
with unittest.mock.patch.object(
- container, "_get", side_effect=lambda x: x
+ container, "_get", side_effect=lambda x: x
) as get_mock:
self.assertEqual("foo", container.get("foo"))
get_mock.assert_called_once_with("foo")
diff --git a/pelican/tools/pelican_import.py b/pelican/tools/pelican_import.py
index 95e196ba..27102f38 100755
--- a/pelican/tools/pelican_import.py
+++ b/pelican/tools/pelican_import.py
@@ -47,74 +47,69 @@ def decode_wp_content(content, br=True):
pre_index += 1
content = content + last_pre
- content = re.sub(r' \s* ', "\n\n", content)
- allblocks = ('(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|'
- 'td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|'
- 'map|area|blockquote|address|math|style|p|h[1-6]|hr|'
- 'fieldset|noscript|samp|legend|section|article|aside|'
- 'hgroup|header|footer|nav|figure|figcaption|details|'
- 'menu|summary)')
- content = re.sub(r'(<' + allblocks + r'[^>]*>)', "\n\\1", content)
- content = re.sub(r'(' + allblocks + r'>)', "\\1\n\n", content)
+ content = re.sub(r" \s* ", "\n\n", content)
+ allblocks = (
+ "(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|"
+ "td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|"
+ "map|area|blockquote|address|math|style|p|h[1-6]|hr|"
+ "fieldset|noscript|samp|legend|section|article|aside|"
+ "hgroup|header|footer|nav|figure|figcaption|details|"
+ "menu|summary)"
+ )
+ content = re.sub(r"(<" + allblocks + r"[^>]*>)", "\n\\1", content)
+ content = re.sub(r"(" + allblocks + r">)", "\\1\n\n", content)
# content = content.replace("\r\n", "\n")
if " inside object/embed
- content = re.sub(r'\s* ]*)>\s*', " ", content)
- content = re.sub(r'\s*\s*', '', content)
+ content = re.sub(r"\s* ]*)>\s*", " ", content)
+ content = re.sub(r"\s*\s*", "", content)
# content = re.sub(r'/\n\n+/', '\n\n', content)
- pgraphs = filter(lambda s: s != "", re.split(r'\n\s*\n', content))
+ pgraphs = filter(lambda s: s != "", re.split(r"\n\s*\n", content))
content = ""
for p in pgraphs:
content = content + "" + p.strip() + "
\n"
# under certain strange conditions it could create
# a P of entirely whitespace
- content = re.sub(r'\s*
', '', content)
- content = re.sub(
- r'([^<]+)(div|address|form)>',
- "
\\1
\\2>",
- content)
+ content = re.sub(r"\s*
", "", content)
+ content = re.sub(r"([^<]+)(div|address|form)>", "
\\1
\\2>", content)
# don't wrap tags
- content = re.sub(
- r'\s*(?' + allblocks + r'[^>]*>)\s*
',
- "\\1",
- content)
+ content = re.sub(r"\s*(?" + allblocks + r"[^>]*>)\s*
", "\\1", content)
# problem with nested lists
- content = re.sub(r'(
', "\\1", content)
- content = re.sub(r'
]*)>', "", content)
- content = content.replace('
', ' ')
- content = re.sub(r'\s*(?' + allblocks + '[^>]*>)', "\\1", content)
- content = re.sub(r'(?' + allblocks + r'[^>]*>)\s*
', "\\1", content)
+ content = re.sub(r"(
", "\\1", content)
+ content = re.sub(r"
]*)>", "", content)
+ content = content.replace("
", " ")
+ content = re.sub(r"\s*(?" + allblocks + "[^>]*>)", "\\1", content)
+ content = re.sub(r"(?" + allblocks + r"[^>]*>)\s*
", "\\1", content)
if br:
+
def _preserve_newline(match):
return match.group(0).replace("\n", " ")
- content = re.sub(
- r'/<(script|style).*?<\/\\1>/s',
- _preserve_newline,
- content)
+
+ content = re.sub(r"/<(script|style).*?<\/\\1>/s", _preserve_newline, content)
# optionally make line breaks
- content = re.sub(r'(?)\s*\n', " \n", content)
+ content = re.sub(r"(?)\s*\n", " \n", content)
content = content.replace(" ", "\n")
+ content = re.sub(r"(?" + allblocks + r"[^>]*>)\s* ", "\\1", content)
content = re.sub(
- r'(?' + allblocks + r'[^>]*>)\s* ', "\\1",
- content)
- content = re.sub(
- r' (\s*?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)',
- '\\1',
- content)
- content = re.sub(r'\n', "", content)
+ r" (\s*?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)", "\\1", content
+ )
+ content = re.sub(r"\n", "", content)
if pre_tags:
+
def _multi_replace(dic, string):
- pattern = r'|'.join(map(re.escape, dic.keys()))
+ pattern = r"|".join(map(re.escape, dic.keys()))
return re.sub(pattern, lambda m: dic[m.group()], string)
+
content = _multi_replace(pre_tags, content)
# convert [caption] tags into
content = re.sub(
- r'\[caption(?:.*?)(?:caption=\"(.*?)\")?\]'
- r'((?:\)?(?:\)(?:\<\/a\>)?)\s?(.*?)\[\/caption\]',
- r'\n\2\n\1\3 \n ',
- content)
+ r"\[caption(?:.*?)(?:caption=\"(.*?)\")?\]"
+ r"((?:\)?(?:\)(?:\<\/a\>)?)\s?(.*?)\[\/caption\]",
+ r"\n\2\n\1\3 \n ",
+ content,
+ )
return content
@@ -124,10 +119,12 @@ def xml_to_soup(xml):
try:
from bs4 import BeautifulSoup
except ImportError:
- error = ('Missing dependency "BeautifulSoup4" and "lxml" required to '
- 'import XML files.')
+ error = (
+ 'Missing dependency "BeautifulSoup4" and "lxml" required to '
+ "import XML files."
+ )
sys.exit(error)
- with open(xml, encoding='utf-8') as infile:
+ with open(xml, encoding="utf-8") as infile:
xmlfile = infile.read()
soup = BeautifulSoup(xmlfile, "xml")
return soup
@@ -144,111 +141,125 @@ def wp2fields(xml, wp_custpost=False):
"""Opens a wordpress XML file, and yield Pelican fields"""
soup = xml_to_soup(xml)
- items = soup.rss.channel.findAll('item')
+ items = soup.rss.channel.findAll("item")
for item in items:
-
- if item.find('status').string in ["publish", "draft"]:
-
+ if item.find("status").string in ["publish", "draft"]:
try:
# Use HTMLParser due to issues with BeautifulSoup 3
title = unescape(item.title.contents[0])
except IndexError:
- title = 'No title [%s]' % item.find('post_name').string
+ title = "No title [%s]" % item.find("post_name").string
logger.warning('Post "%s" is lacking a proper title', title)
- post_name = item.find('post_name').string
- post_id = item.find('post_id').string
+ post_name = item.find("post_name").string
+ post_id = item.find("post_id").string
filename = get_filename(post_name, post_id)
- content = item.find('encoded').string
- raw_date = item.find('post_date').string
- if raw_date == '0000-00-00 00:00:00':
+ content = item.find("encoded").string
+ raw_date = item.find("post_date").string
+ if raw_date == "0000-00-00 00:00:00":
date = None
else:
- date_object = SafeDatetime.strptime(
- raw_date, '%Y-%m-%d %H:%M:%S')
- date = date_object.strftime('%Y-%m-%d %H:%M')
- author = item.find('creator').string
+ date_object = SafeDatetime.strptime(raw_date, "%Y-%m-%d %H:%M:%S")
+ date = date_object.strftime("%Y-%m-%d %H:%M")
+ author = item.find("creator").string
- categories = [cat.string for cat
- in item.findAll('category', {'domain': 'category'})]
+ categories = [
+ cat.string for cat in item.findAll("category", {"domain": "category"})
+ ]
- tags = [tag.string for tag
- in item.findAll('category', {'domain': 'post_tag'})]
+ tags = [
+ tag.string for tag in item.findAll("category", {"domain": "post_tag"})
+ ]
# To publish a post the status should be 'published'
- status = 'published' if item.find('status').string == "publish" \
- else item.find('status').string
+ status = (
+ "published"
+ if item.find("status").string == "publish"
+ else item.find("status").string
+ )
- kind = 'article'
- post_type = item.find('post_type').string
- if post_type == 'page':
- kind = 'page'
+ kind = "article"
+ post_type = item.find("post_type").string
+ if post_type == "page":
+ kind = "page"
elif wp_custpost:
- if post_type == 'post':
+ if post_type == "post":
pass
# Old behaviour was to name everything not a page as an
# article.Theoretically all attachments have status == inherit
# so no attachments should be here. But this statement is to
# maintain existing behaviour in case that doesn't hold true.
- elif post_type == 'attachment':
+ elif post_type == "attachment":
pass
else:
kind = post_type
- yield (title, content, filename, date, author, categories,
- tags, status, kind, 'wp-html')
+ yield (
+ title,
+ content,
+ filename,
+ date,
+ author,
+ categories,
+ tags,
+ status,
+ kind,
+ "wp-html",
+ )
def blogger2fields(xml):
"""Opens a blogger XML file, and yield Pelican fields"""
soup = xml_to_soup(xml)
- entries = soup.feed.findAll('entry')
+ entries = soup.feed.findAll("entry")
for entry in entries:
raw_kind = entry.find(
- 'category', {'scheme': 'http://schemas.google.com/g/2005#kind'}
- ).get('term')
- if raw_kind == 'http://schemas.google.com/blogger/2008/kind#post':
- kind = 'article'
- elif raw_kind == 'http://schemas.google.com/blogger/2008/kind#comment':
- kind = 'comment'
- elif raw_kind == 'http://schemas.google.com/blogger/2008/kind#page':
- kind = 'page'
+ "category", {"scheme": "http://schemas.google.com/g/2005#kind"}
+ ).get("term")
+ if raw_kind == "http://schemas.google.com/blogger/2008/kind#post":
+ kind = "article"
+ elif raw_kind == "http://schemas.google.com/blogger/2008/kind#comment":
+ kind = "comment"
+ elif raw_kind == "http://schemas.google.com/blogger/2008/kind#page":
+ kind = "page"
else:
continue
try:
- assert kind != 'comment'
- filename = entry.find('link', {'rel': 'alternate'})['href']
+ assert kind != "comment"
+ filename = entry.find("link", {"rel": "alternate"})["href"]
filename = os.path.splitext(os.path.basename(filename))[0]
except (AssertionError, TypeError, KeyError):
- filename = entry.find('id').string.split('.')[-1]
+ filename = entry.find("id").string.split(".")[-1]
- title = entry.find('title').string or ''
+ title = entry.find("title").string or ""
- content = entry.find('content').string
- raw_date = entry.find('published').string
- if hasattr(SafeDatetime, 'fromisoformat'):
+ content = entry.find("content").string
+ raw_date = entry.find("published").string
+ if hasattr(SafeDatetime, "fromisoformat"):
date_object = SafeDatetime.fromisoformat(raw_date)
else:
- date_object = SafeDatetime.strptime(
- raw_date[:23], '%Y-%m-%dT%H:%M:%S.%f')
- date = date_object.strftime('%Y-%m-%d %H:%M')
- author = entry.find('author').find('name').string
+ date_object = SafeDatetime.strptime(raw_date[:23], "%Y-%m-%dT%H:%M:%S.%f")
+ date = date_object.strftime("%Y-%m-%d %H:%M")
+ author = entry.find("author").find("name").string
# blogger posts only have tags, no category
- tags = [tag.get('term') for tag in entry.findAll(
- 'category', {'scheme': 'http://www.blogger.com/atom/ns#'})]
+ tags = [
+ tag.get("term")
+ for tag in entry.findAll(
+ "category", {"scheme": "http://www.blogger.com/atom/ns#"}
+ )
+ ]
# Drafts have yes
- status = 'published'
+ status = "published"
try:
- if entry.find('control').find('draft').string == 'yes':
- status = 'draft'
+ if entry.find("control").find("draft").string == "yes":
+ status = "draft"
except AttributeError:
pass
- yield (title, content, filename, date, author, None, tags, status,
- kind, 'html')
+ yield (title, content, filename, date, author, None, tags, status, kind, "html")
def dc2fields(file):
@@ -256,9 +267,11 @@ def dc2fields(file):
try:
from bs4 import BeautifulSoup
except ImportError:
- error = ('Missing dependency '
- '"BeautifulSoup4" and "lxml" required '
- 'to import Dotclear files.')
+ error = (
+ "Missing dependency "
+ '"BeautifulSoup4" and "lxml" required '
+ "to import Dotclear files."
+ )
sys.exit(error)
in_cat = False
@@ -266,15 +279,14 @@ def dc2fields(file):
category_list = {}
posts = []
- with open(file, encoding='utf-8') as f:
-
+ with open(file, encoding="utf-8") as f:
for line in f:
# remove final \n
line = line[:-1]
- if line.startswith('[category'):
+ if line.startswith("[category"):
in_cat = True
- elif line.startswith('[post'):
+ elif line.startswith("[post"):
in_post = True
elif in_cat:
fields = line.split('","')
@@ -294,7 +306,7 @@ def dc2fields(file):
print("%i posts read." % len(posts))
- subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
+ subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
for post in posts:
fields = post.split('","')
@@ -329,44 +341,39 @@ def dc2fields(file):
# redirect_url = fields[28][:-1]
# remove seconds
- post_creadt = ':'.join(post_creadt.split(':')[0:2])
+ post_creadt = ":".join(post_creadt.split(":")[0:2])
- author = ''
+ author = ""
categories = []
tags = []
if cat_id:
- categories = [category_list[id].strip() for id
- in cat_id.split(',')]
+ categories = [category_list[id].strip() for id in cat_id.split(",")]
# Get tags related to a post
- tag = (post_meta.replace('{', '')
- .replace('}', '')
- .replace('a:1:s:3:\\"tag\\";a:', '')
- .replace('a:0:', ''))
+ tag = (
+ post_meta.replace("{", "")
+ .replace("}", "")
+ .replace('a:1:s:3:\\"tag\\";a:', "")
+ .replace("a:0:", "")
+ )
if len(tag) > 1:
if int(len(tag[:1])) == 1:
newtag = tag.split('"')[1]
tags.append(
- BeautifulSoup(
- newtag,
- 'xml'
- )
+ BeautifulSoup(newtag, "xml")
# bs4 always outputs UTF-8
- .decode('utf-8')
+ .decode("utf-8")
)
else:
i = 1
j = 1
- while (i <= int(tag[:1])):
- newtag = tag.split('"')[j].replace('\\', '')
+ while i <= int(tag[:1]):
+ newtag = tag.split('"')[j].replace("\\", "")
tags.append(
- BeautifulSoup(
- newtag,
- 'xml'
- )
+ BeautifulSoup(newtag, "xml")
# bs4 always outputs UTF-8
- .decode('utf-8')
+ .decode("utf-8")
)
i = i + 1
if j < int(tag[:1]) * 2:
@@ -381,116 +388,149 @@ def dc2fields(file):
content = post_excerpt + post_content
else:
content = post_excerpt_xhtml + post_content_xhtml
- content = content.replace('\\n', '')
+ content = content.replace("\\n", "")
post_format = "html"
- kind = 'article' # TODO: Recognise pages
- status = 'published' # TODO: Find a way for draft posts
+ kind = "article" # TODO: Recognise pages
+ status = "published" # TODO: Find a way for draft posts
- yield (post_title, content, slugify(post_title, regex_subs=subs),
- post_creadt, author, categories, tags, status, kind,
- post_format)
+ yield (
+ post_title,
+ content,
+ slugify(post_title, regex_subs=subs),
+ post_creadt,
+ author,
+ categories,
+ tags,
+ status,
+ kind,
+ post_format,
+ )
def _get_tumblr_posts(api_key, blogname, offset=0):
import json
import urllib.request as urllib_request
- url = ("https://api.tumblr.com/v2/blog/%s.tumblr.com/"
- "posts?api_key=%s&offset=%d&filter=raw") % (
- blogname, api_key, offset)
+
+ url = (
+ "https://api.tumblr.com/v2/blog/%s.tumblr.com/"
+ "posts?api_key=%s&offset=%d&filter=raw"
+ ) % (blogname, api_key, offset)
request = urllib_request.Request(url)
handle = urllib_request.urlopen(request)
- posts = json.loads(handle.read().decode('utf-8'))
- return posts.get('response').get('posts')
+ posts = json.loads(handle.read().decode("utf-8"))
+ return posts.get("response").get("posts")
def tumblr2fields(api_key, blogname):
- """ Imports Tumblr posts (API v2)"""
+ """Imports Tumblr posts (API v2)"""
offset = 0
posts = _get_tumblr_posts(api_key, blogname, offset)
- subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
+ subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
while len(posts) > 0:
for post in posts:
- title = \
- post.get('title') or \
- post.get('source_title') or \
- post.get('type').capitalize()
- slug = post.get('slug') or slugify(title, regex_subs=subs)
- tags = post.get('tags')
- timestamp = post.get('timestamp')
+ title = (
+ post.get("title")
+ or post.get("source_title")
+ or post.get("type").capitalize()
+ )
+ slug = post.get("slug") or slugify(title, regex_subs=subs)
+ tags = post.get("tags")
+ timestamp = post.get("timestamp")
date = SafeDatetime.fromtimestamp(
int(timestamp), tz=datetime.timezone.utc
).strftime("%Y-%m-%d %H:%M:%S%z")
- slug = SafeDatetime.fromtimestamp(
- int(timestamp), tz=datetime.timezone.utc
- ).strftime("%Y-%m-%d-") + slug
- format = post.get('format')
- content = post.get('body')
- type = post.get('type')
- if type == 'photo':
- if format == 'markdown':
- fmtstr = ''
+ slug = (
+ SafeDatetime.fromtimestamp(
+ int(timestamp), tz=datetime.timezone.utc
+ ).strftime("%Y-%m-%d-")
+ + slug
+ )
+ format = post.get("format")
+ content = post.get("body")
+ type = post.get("type")
+ if type == "photo":
+ if format == "markdown":
+ fmtstr = ""
else:
fmtstr = ' '
- content = '\n'.join(
- fmtstr % (photo.get('caption'),
- photo.get('original_size').get('url'))
- for photo in post.get('photos'))
- elif type == 'quote':
- if format == 'markdown':
- fmtstr = '\n\n— %s'
+ content = "\n".join(
+ fmtstr
+ % (photo.get("caption"), photo.get("original_size").get("url"))
+ for photo in post.get("photos")
+ )
+ elif type == "quote":
+ if format == "markdown":
+ fmtstr = "\n\n— %s"
else:
- fmtstr = '— %s
'
- content = post.get('text') + fmtstr % post.get('source')
- elif type == 'link':
- if format == 'markdown':
- fmtstr = '[via](%s)\n\n'
+ fmtstr = "— %s
"
+ content = post.get("text") + fmtstr % post.get("source")
+ elif type == "link":
+ if format == "markdown":
+ fmtstr = "[via](%s)\n\n"
else:
fmtstr = 'via
\n'
- content = fmtstr % post.get('url') + post.get('description')
- elif type == 'audio':
- if format == 'markdown':
- fmtstr = '[via](%s)\n\n'
+ content = fmtstr % post.get("url") + post.get("description")
+ elif type == "audio":
+ if format == "markdown":
+ fmtstr = "[via](%s)\n\n"
else:
fmtstr = 'via
\n'
- content = fmtstr % post.get('source_url') + \
- post.get('caption') + \
- post.get('player')
- elif type == 'video':
- if format == 'markdown':
- fmtstr = '[via](%s)\n\n'
+ content = (
+ fmtstr % post.get("source_url")
+ + post.get("caption")
+ + post.get("player")
+ )
+ elif type == "video":
+ if format == "markdown":
+ fmtstr = "[via](%s)\n\n"
else:
fmtstr = 'via
\n'
- source = fmtstr % post.get('source_url')
- caption = post.get('caption')
+ source = fmtstr % post.get("source_url")
+ caption = post.get("caption")
players = [
# If embed_code is False, couldn't get the video
- player.get('embed_code') or None
- for player in post.get('player')]
+ player.get("embed_code") or None
+ for player in post.get("player")
+ ]
# If there are no embeddable players, say so, once
- if len(players) > 0 and all(
- player is None for player in players):
+ if len(players) > 0 and all(player is None for player in players):
players = "(This video isn't available anymore.)
\n"
else:
- players = '\n'.join(players)
+ players = "\n".join(players)
content = source + caption + players
- elif type == 'answer':
- title = post.get('question')
- content = (''
- '%s '
- ': %s'
- '
\n'
- ' %s' % (post.get('asking_name'),
- post.get('asking_url'),
- post.get('question'),
- post.get('answer')))
+ elif type == "answer":
+ title = post.get("question")
+ content = (
+ ""
+ '%s '
+ ": %s"
+ "
\n"
+ " %s"
+ % (
+ post.get("asking_name"),
+ post.get("asking_url"),
+ post.get("question"),
+ post.get("answer"),
+ )
+ )
- content = content.rstrip() + '\n'
- kind = 'article'
- status = 'published' # TODO: Find a way for draft posts
+ content = content.rstrip() + "\n"
+ kind = "article"
+ status = "published" # TODO: Find a way for draft posts
- yield (title, content, slug, date, post.get('blog_name'), [type],
- tags, status, kind, format)
+ yield (
+ title,
+ content,
+ slug,
+ date,
+ post.get("blog_name"),
+ [type],
+ tags,
+ status,
+ kind,
+ format,
+ )
offset += len(posts)
posts = _get_tumblr_posts(api_key, blogname, offset)
@@ -499,145 +539,167 @@ def tumblr2fields(api_key, blogname):
def feed2fields(file):
"""Read a feed and yield pelican fields"""
import feedparser
+
d = feedparser.parse(file)
- subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
+ subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
for entry in d.entries:
- date = (time.strftime('%Y-%m-%d %H:%M', entry.updated_parsed)
- if hasattr(entry, 'updated_parsed') else None)
- author = entry.author if hasattr(entry, 'author') else None
- tags = ([e['term'] for e in entry.tags]
- if hasattr(entry, 'tags') else None)
+ date = (
+ time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed)
+ if hasattr(entry, "updated_parsed")
+ else None
+ )
+ author = entry.author if hasattr(entry, "author") else None
+ tags = [e["term"] for e in entry.tags] if hasattr(entry, "tags") else None
slug = slugify(entry.title, regex_subs=subs)
- kind = 'article'
- yield (entry.title, entry.description, slug, date,
- author, [], tags, None, kind, 'html')
+ kind = "article"
+ yield (
+ entry.title,
+ entry.description,
+ slug,
+ date,
+ author,
+ [],
+ tags,
+ None,
+ kind,
+ "html",
+ )
-def build_header(title, date, author, categories, tags, slug,
- status=None, attachments=None):
+def build_header(
+ title, date, author, categories, tags, slug, status=None, attachments=None
+):
"""Build a header from a list of fields"""
from docutils.utils import column_width
- header = '{}\n{}\n'.format(title, '#' * column_width(title))
+ header = "{}\n{}\n".format(title, "#" * column_width(title))
if date:
- header += ':date: %s\n' % date
+ header += ":date: %s\n" % date
if author:
- header += ':author: %s\n' % author
+ header += ":author: %s\n" % author
if categories:
- header += ':category: %s\n' % ', '.join(categories)
+ header += ":category: %s\n" % ", ".join(categories)
if tags:
- header += ':tags: %s\n' % ', '.join(tags)
+ header += ":tags: %s\n" % ", ".join(tags)
if slug:
- header += ':slug: %s\n' % slug
+ header += ":slug: %s\n" % slug
if status:
- header += ':status: %s\n' % status
+ header += ":status: %s\n" % status
if attachments:
- header += ':attachments: %s\n' % ', '.join(attachments)
- header += '\n'
+ header += ":attachments: %s\n" % ", ".join(attachments)
+ header += "\n"
return header
-def build_asciidoc_header(title, date, author, categories, tags, slug,
- status=None, attachments=None):
+def build_asciidoc_header(
+ title, date, author, categories, tags, slug, status=None, attachments=None
+):
"""Build a header from a list of fields"""
- header = '= %s\n' % title
+ header = "= %s\n" % title
if author:
- header += '%s\n' % author
+ header += "%s\n" % author
if date:
- header += '%s\n' % date
+ header += "%s\n" % date
if categories:
- header += ':category: %s\n' % ', '.join(categories)
+ header += ":category: %s\n" % ", ".join(categories)
if tags:
- header += ':tags: %s\n' % ', '.join(tags)
+ header += ":tags: %s\n" % ", ".join(tags)
if slug:
- header += ':slug: %s\n' % slug
+ header += ":slug: %s\n" % slug
if status:
- header += ':status: %s\n' % status
+ header += ":status: %s\n" % status
if attachments:
- header += ':attachments: %s\n' % ', '.join(attachments)
- header += '\n'
+ header += ":attachments: %s\n" % ", ".join(attachments)
+ header += "\n"
return header
-def build_markdown_header(title, date, author, categories, tags,
- slug, status=None, attachments=None):
+def build_markdown_header(
+ title, date, author, categories, tags, slug, status=None, attachments=None
+):
"""Build a header from a list of fields"""
- header = 'Title: %s\n' % title
+ header = "Title: %s\n" % title
if date:
- header += 'Date: %s\n' % date
+ header += "Date: %s\n" % date
if author:
- header += 'Author: %s\n' % author
+ header += "Author: %s\n" % author
if categories:
- header += 'Category: %s\n' % ', '.join(categories)
+ header += "Category: %s\n" % ", ".join(categories)
if tags:
- header += 'Tags: %s\n' % ', '.join(tags)
+ header += "Tags: %s\n" % ", ".join(tags)
if slug:
- header += 'Slug: %s\n' % slug
+ header += "Slug: %s\n" % slug
if status:
- header += 'Status: %s\n' % status
+ header += "Status: %s\n" % status
if attachments:
- header += 'Attachments: %s\n' % ', '.join(attachments)
- header += '\n'
+ header += "Attachments: %s\n" % ", ".join(attachments)
+ header += "\n"
return header
-def get_ext(out_markup, in_markup='html'):
- if out_markup == 'asciidoc':
- ext = '.adoc'
- elif in_markup == 'markdown' or out_markup == 'markdown':
- ext = '.md'
+def get_ext(out_markup, in_markup="html"):
+ if out_markup == "asciidoc":
+ ext = ".adoc"
+ elif in_markup == "markdown" or out_markup == "markdown":
+ ext = ".md"
else:
- ext = '.rst'
+ ext = ".rst"
return ext
-def get_out_filename(output_path, filename, ext, kind,
- dirpage, dircat, categories, wp_custpost, slug_subs):
+def get_out_filename(
+ output_path,
+ filename,
+ ext,
+ kind,
+ dirpage,
+ dircat,
+ categories,
+ wp_custpost,
+ slug_subs,
+):
filename = os.path.basename(filename)
# Enforce filename restrictions for various filesystems at once; see
# https://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
# we do not need to filter words because an extension will be appended
- filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
- filename = filename.lstrip('.') # should not start with a dot
+ filename = re.sub(r'[<>:"/\\|?*^% ]', "-", filename) # invalid chars
+ filename = filename.lstrip(".") # should not start with a dot
if not filename:
- filename = '_'
+ filename = "_"
filename = filename[:249] # allow for 5 extra characters
out_filename = os.path.join(output_path, filename + ext)
# option to put page posts in pages/ subdirectory
- if dirpage and kind == 'page':
- pages_dir = os.path.join(output_path, 'pages')
+ if dirpage and kind == "page":
+ pages_dir = os.path.join(output_path, "pages")
if not os.path.isdir(pages_dir):
os.mkdir(pages_dir)
out_filename = os.path.join(pages_dir, filename + ext)
- elif not dirpage and kind == 'page':
+ elif not dirpage and kind == "page":
pass
# option to put wp custom post types in directories with post type
# names. Custom post types can also have categories so option to
# create subdirectories with category names
- elif kind != 'article':
+ elif kind != "article":
if wp_custpost:
typename = slugify(kind, regex_subs=slug_subs)
else:
- typename = ''
- kind = 'article'
+ typename = ""
+ kind = "article"
if dircat and (len(categories) > 0):
- catname = slugify(
- categories[0], regex_subs=slug_subs, preserve_case=True)
+ catname = slugify(categories[0], regex_subs=slug_subs, preserve_case=True)
else:
- catname = ''
- out_filename = os.path.join(output_path, typename,
- catname, filename + ext)
+ catname = ""
+ out_filename = os.path.join(output_path, typename, catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, typename, catname)):
os.makedirs(os.path.join(output_path, typename, catname))
# option to put files in directories with categories names
elif dircat and (len(categories) > 0):
- catname = slugify(
- categories[0], regex_subs=slug_subs, preserve_case=True)
+ catname = slugify(categories[0], regex_subs=slug_subs, preserve_case=True)
out_filename = os.path.join(output_path, catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, catname)):
os.mkdir(os.path.join(output_path, catname))
@@ -650,18 +712,19 @@ def get_attachments(xml):
of the attachment_urls
"""
soup = xml_to_soup(xml)
- items = soup.rss.channel.findAll('item')
+ items = soup.rss.channel.findAll("item")
names = {}
attachments = []
for item in items:
- kind = item.find('post_type').string
- post_name = item.find('post_name').string
- post_id = item.find('post_id').string
+ kind = item.find("post_type").string
+ post_name = item.find("post_name").string
+ post_id = item.find("post_id").string
- if kind == 'attachment':
- attachments.append((item.find('post_parent').string,
- item.find('attachment_url').string))
+ if kind == "attachment":
+ attachments.append(
+ (item.find("post_parent").string, item.find("attachment_url").string)
+ )
else:
filename = get_filename(post_name, post_id)
names[post_id] = filename
@@ -686,23 +749,23 @@ def download_attachments(output_path, urls):
path = urlparse(url).path
# teardown path and rebuild to negate any errors with
# os.path.join and leading /'s
- path = path.split('/')
+ path = path.split("/")
filename = path.pop(-1)
- localpath = ''
+ localpath = ""
for item in path:
- if sys.platform != 'win32' or ':' not in item:
+ if sys.platform != "win32" or ":" not in item:
localpath = os.path.join(localpath, item)
full_path = os.path.join(output_path, localpath)
# Generate percent-encoded URL
scheme, netloc, path, query, fragment = urlsplit(url)
- if scheme != 'file':
+ if scheme != "file":
path = quote(path)
url = urlunsplit((scheme, netloc, path, query, fragment))
if not os.path.exists(full_path):
os.makedirs(full_path)
- print('downloading {}'.format(filename))
+ print("downloading {}".format(filename))
try:
urlretrieve(url, os.path.join(full_path, filename))
locations[url] = os.path.join(localpath, filename)
@@ -713,43 +776,61 @@ def download_attachments(output_path, urls):
def is_pandoc_needed(in_markup):
- return in_markup in ('html', 'wp-html')
+ return in_markup in ("html", "wp-html")
def get_pandoc_version():
- cmd = ['pandoc', '--version']
+ cmd = ["pandoc", "--version"]
try:
output = subprocess.check_output(cmd, universal_newlines=True)
except (subprocess.CalledProcessError, OSError) as e:
logger.warning("Pandoc version unknown: %s", e)
return ()
- return tuple(int(i) for i in output.split()[1].split('.'))
+ return tuple(int(i) for i in output.split()[1].split("."))
def update_links_to_attached_files(content, attachments):
for old_url, new_path in attachments.items():
# url may occur both with http:// and https://
- http_url = old_url.replace('https://', 'http://')
- https_url = old_url.replace('http://', 'https://')
+ http_url = old_url.replace("https://", "http://")
+ https_url = old_url.replace("http://", "https://")
for url in [http_url, https_url]:
- content = content.replace(url, '{static}' + new_path)
+ content = content.replace(url, "{static}" + new_path)
return content
def fields2pelican(
- fields, out_markup, output_path,
- dircat=False, strip_raw=False, disable_slugs=False,
- dirpage=False, filename_template=None, filter_author=None,
- wp_custpost=False, wp_attach=False, attachments=None):
-
+ fields,
+ out_markup,
+ output_path,
+ dircat=False,
+ strip_raw=False,
+ disable_slugs=False,
+ dirpage=False,
+ filename_template=None,
+ filter_author=None,
+ wp_custpost=False,
+ wp_attach=False,
+ attachments=None,
+):
pandoc_version = get_pandoc_version()
posts_require_pandoc = []
- slug_subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
+ slug_subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
- for (title, content, filename, date, author, categories, tags, status,
- kind, in_markup) in fields:
+ for (
+ title,
+ content,
+ filename,
+ date,
+ author,
+ categories,
+ tags,
+ status,
+ kind,
+ in_markup,
+ ) in fields:
if filter_author and filter_author != author:
continue
if is_pandoc_needed(in_markup) and not pandoc_version:
@@ -767,85 +848,120 @@ def fields2pelican(
links = None
ext = get_ext(out_markup, in_markup)
- if ext == '.adoc':
- header = build_asciidoc_header(title, date, author, categories,
- tags, slug, status, attachments)
- elif ext == '.md':
+ if ext == ".adoc":
+ header = build_asciidoc_header(
+ title, date, author, categories, tags, slug, status, attachments
+ )
+ elif ext == ".md":
header = build_markdown_header(
- title, date, author, categories, tags, slug,
- status, links.values() if links else None)
+ title,
+ date,
+ author,
+ categories,
+ tags,
+ slug,
+ status,
+ links.values() if links else None,
+ )
else:
- out_markup = 'rst'
- header = build_header(title, date, author, categories,
- tags, slug, status, links.values()
- if links else None)
+ out_markup = "rst"
+ header = build_header(
+ title,
+ date,
+ author,
+ categories,
+ tags,
+ slug,
+ status,
+ links.values() if links else None,
+ )
out_filename = get_out_filename(
- output_path, filename, ext, kind, dirpage, dircat,
- categories, wp_custpost, slug_subs)
+ output_path,
+ filename,
+ ext,
+ kind,
+ dirpage,
+ dircat,
+ categories,
+ wp_custpost,
+ slug_subs,
+ )
print(out_filename)
- if in_markup in ('html', 'wp-html'):
+ if in_markup in ("html", "wp-html"):
with tempfile.TemporaryDirectory() as tmpdir:
- html_filename = os.path.join(tmpdir, 'pandoc-input.html')
+ html_filename = os.path.join(tmpdir, "pandoc-input.html")
# Replace newlines with paragraphs wrapped with so
# HTML is valid before conversion
- if in_markup == 'wp-html':
+ if in_markup == "wp-html":
new_content = decode_wp_content(content)
else:
paragraphs = content.splitlines()
- paragraphs = ['
{}
'.format(p) for p in paragraphs]
- new_content = ''.join(paragraphs)
- with open(html_filename, 'w', encoding='utf-8') as fp:
+ paragraphs = ["{}
".format(p) for p in paragraphs]
+ new_content = "".join(paragraphs)
+ with open(html_filename, "w", encoding="utf-8") as fp:
fp.write(new_content)
if pandoc_version < (2,):
- parse_raw = '--parse-raw' if not strip_raw else ''
- wrap_none = '--wrap=none' \
- if pandoc_version >= (1, 16) else '--no-wrap'
- cmd = ('pandoc --normalize {0} --from=html'
- ' --to={1} {2} -o "{3}" "{4}"')
- cmd = cmd.format(parse_raw,
- out_markup if out_markup != 'markdown' else "gfm",
- wrap_none,
- out_filename, html_filename)
+ parse_raw = "--parse-raw" if not strip_raw else ""
+ wrap_none = (
+ "--wrap=none" if pandoc_version >= (1, 16) else "--no-wrap"
+ )
+ cmd = (
+ "pandoc --normalize {0} --from=html"
+ ' --to={1} {2} -o "{3}" "{4}"'
+ )
+ cmd = cmd.format(
+ parse_raw,
+ out_markup if out_markup != "markdown" else "gfm",
+ wrap_none,
+ out_filename,
+ html_filename,
+ )
else:
- from_arg = '-f html+raw_html' if not strip_raw else '-f html'
- cmd = ('pandoc {0} --to={1}-smart --wrap=none -o "{2}" "{3}"')
- cmd = cmd.format(from_arg,
- out_markup if out_markup != 'markdown' else "gfm",
- out_filename, html_filename)
+ from_arg = "-f html+raw_html" if not strip_raw else "-f html"
+ cmd = 'pandoc {0} --to={1}-smart --wrap=none -o "{2}" "{3}"'
+ cmd = cmd.format(
+ from_arg,
+ out_markup if out_markup != "markdown" else "gfm",
+ out_filename,
+ html_filename,
+ )
try:
rc = subprocess.call(cmd, shell=True)
if rc < 0:
- error = 'Child was terminated by signal %d' % -rc
+ error = "Child was terminated by signal %d" % -rc
exit(error)
elif rc > 0:
- error = 'Please, check your Pandoc installation.'
+ error = "Please, check your Pandoc installation."
exit(error)
except OSError as e:
- error = 'Pandoc execution failed: %s' % e
+ error = "Pandoc execution failed: %s" % e
exit(error)
- with open(out_filename, encoding='utf-8') as fs:
+ with open(out_filename, encoding="utf-8") as fs:
content = fs.read()
- if out_markup == 'markdown':
+ if out_markup == "markdown":
# In markdown, to insert a , end a line with two
# or more spaces & then a end-of-line
- content = content.replace('\\\n ', ' \n')
- content = content.replace('\\\n', ' \n')
+ content = content.replace("\\\n ", " \n")
+ content = content.replace("\\\n", " \n")
if wp_attach and links:
content = update_links_to_attached_files(content, links)
- with open(out_filename, 'w', encoding='utf-8') as fs:
+ with open(out_filename, "w", encoding="utf-8") as fs:
fs.write(header + content)
if posts_require_pandoc:
- logger.error("Pandoc must be installed to import the following posts:"
- "\n {}".format("\n ".join(posts_require_pandoc)))
+ logger.error(
+ "Pandoc must be installed to import the following posts:" "\n {}".format(
+ "\n ".join(posts_require_pandoc)
+ )
+ )
if wp_attach and attachments and None in attachments:
print("downloading attachments that don't have a parent post")
@@ -856,111 +972,136 @@ def fields2pelican(
def main():
parser = argparse.ArgumentParser(
description="Transform feed, Blogger, Dotclear, Tumblr, or "
- "WordPress files into reST (rst) or Markdown (md) files. "
- "Be sure to have pandoc installed.",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ "WordPress files into reST (rst) or Markdown (md) files. "
+ "Be sure to have pandoc installed.",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(dest="input", help="The input file to read")
parser.add_argument(
- dest='input', help='The input file to read')
+ "--blogger", action="store_true", dest="blogger", help="Blogger XML export"
+ )
parser.add_argument(
- '--blogger', action='store_true', dest='blogger',
- help='Blogger XML export')
+ "--dotclear", action="store_true", dest="dotclear", help="Dotclear export"
+ )
parser.add_argument(
- '--dotclear', action='store_true', dest='dotclear',
- help='Dotclear export')
+ "--tumblr", action="store_true", dest="tumblr", help="Tumblr export"
+ )
parser.add_argument(
- '--tumblr', action='store_true', dest='tumblr',
- help='Tumblr export')
+ "--wpfile", action="store_true", dest="wpfile", help="Wordpress XML export"
+ )
parser.add_argument(
- '--wpfile', action='store_true', dest='wpfile',
- help='Wordpress XML export')
+ "--feed", action="store_true", dest="feed", help="Feed to parse"
+ )
parser.add_argument(
- '--feed', action='store_true', dest='feed',
- help='Feed to parse')
+ "-o", "--output", dest="output", default="content", help="Output path"
+ )
parser.add_argument(
- '-o', '--output', dest='output', default='content',
- help='Output path')
+ "-m",
+ "--markup",
+ dest="markup",
+ default="rst",
+ help="Output markup format (supports rst & markdown)",
+ )
parser.add_argument(
- '-m', '--markup', dest='markup', default='rst',
- help='Output markup format (supports rst & markdown)')
+ "--dir-cat",
+ action="store_true",
+ dest="dircat",
+ help="Put files in directories with categories name",
+ )
parser.add_argument(
- '--dir-cat', action='store_true', dest='dircat',
- help='Put files in directories with categories name')
+ "--dir-page",
+ action="store_true",
+ dest="dirpage",
+ help=(
+ 'Put files recognised as pages in "pages/" sub-directory'
+ " (blogger and wordpress import only)"
+ ),
+ )
parser.add_argument(
- '--dir-page', action='store_true', dest='dirpage',
- help=('Put files recognised as pages in "pages/" sub-directory'
- ' (blogger and wordpress import only)'))
+ "--filter-author",
+ dest="author",
+ help="Import only post from the specified author",
+ )
parser.add_argument(
- '--filter-author', dest='author',
- help='Import only post from the specified author')
- parser.add_argument(
- '--strip-raw', action='store_true', dest='strip_raw',
+ "--strip-raw",
+ action="store_true",
+ dest="strip_raw",
help="Strip raw HTML code that can't be converted to "
- "markup such as flash embeds or iframes (wordpress import only)")
+ "markup such as flash embeds or iframes (wordpress import only)",
+ )
parser.add_argument(
- '--wp-custpost', action='store_true',
- dest='wp_custpost',
- help='Put wordpress custom post types in directories. If used with '
- '--dir-cat option directories will be created as '
- '/post_type/category/ (wordpress import only)')
+ "--wp-custpost",
+ action="store_true",
+ dest="wp_custpost",
+ help="Put wordpress custom post types in directories. If used with "
+ "--dir-cat option directories will be created as "
+ "/post_type/category/ (wordpress import only)",
+ )
parser.add_argument(
- '--wp-attach', action='store_true', dest='wp_attach',
- help='(wordpress import only) Download files uploaded to wordpress as '
- 'attachments. Files will be added to posts as a list in the post '
- 'header. All files will be downloaded, even if '
- "they aren't associated with a post. Files will be downloaded "
- 'with their original path inside the output directory. '
- 'e.g. output/wp-uploads/date/postname/file.jpg '
- '-- Requires an internet connection --')
+ "--wp-attach",
+ action="store_true",
+ dest="wp_attach",
+ help="(wordpress import only) Download files uploaded to wordpress as "
+ "attachments. Files will be added to posts as a list in the post "
+ "header. All files will be downloaded, even if "
+ "they aren't associated with a post. Files will be downloaded "
+ "with their original path inside the output directory. "
+ "e.g. output/wp-uploads/date/postname/file.jpg "
+ "-- Requires an internet connection --",
+ )
parser.add_argument(
- '--disable-slugs', action='store_true',
- dest='disable_slugs',
- help='Disable storing slugs from imported posts within output. '
- 'With this disabled, your Pelican URLs may not be consistent '
- 'with your original posts.')
+ "--disable-slugs",
+ action="store_true",
+ dest="disable_slugs",
+ help="Disable storing slugs from imported posts within output. "
+ "With this disabled, your Pelican URLs may not be consistent "
+ "with your original posts.",
+ )
parser.add_argument(
- '-b', '--blogname', dest='blogname',
- help="Blog name (Tumblr import only)")
+ "-b", "--blogname", dest="blogname", help="Blog name (Tumblr import only)"
+ )
args = parser.parse_args()
input_type = None
if args.blogger:
- input_type = 'blogger'
+ input_type = "blogger"
elif args.dotclear:
- input_type = 'dotclear'
+ input_type = "dotclear"
elif args.tumblr:
- input_type = 'tumblr'
+ input_type = "tumblr"
elif args.wpfile:
- input_type = 'wordpress'
+ input_type = "wordpress"
elif args.feed:
- input_type = 'feed'
+ input_type = "feed"
else:
- error = ('You must provide either --blogger, --dotclear, '
- '--tumblr, --wpfile or --feed options')
+ error = (
+ "You must provide either --blogger, --dotclear, "
+ "--tumblr, --wpfile or --feed options"
+ )
exit(error)
if not os.path.exists(args.output):
try:
os.mkdir(args.output)
except OSError:
- error = 'Unable to create the output folder: ' + args.output
+ error = "Unable to create the output folder: " + args.output
exit(error)
- if args.wp_attach and input_type != 'wordpress':
- error = ('You must be importing a wordpress xml '
- 'to use the --wp-attach option')
+ if args.wp_attach and input_type != "wordpress":
+ error = "You must be importing a wordpress xml " "to use the --wp-attach option"
exit(error)
- if input_type == 'blogger':
+ if input_type == "blogger":
fields = blogger2fields(args.input)
- elif input_type == 'dotclear':
+ elif input_type == "dotclear":
fields = dc2fields(args.input)
- elif input_type == 'tumblr':
+ elif input_type == "tumblr":
fields = tumblr2fields(args.input, args.blogname)
- elif input_type == 'wordpress':
+ elif input_type == "wordpress":
fields = wp2fields(args.input, args.wp_custpost or False)
- elif input_type == 'feed':
+ elif input_type == "feed":
fields = feed2fields(args.input)
if args.wp_attach:
@@ -970,12 +1111,16 @@ def main():
# init logging
init()
- fields2pelican(fields, args.markup, args.output,
- dircat=args.dircat or False,
- dirpage=args.dirpage or False,
- strip_raw=args.strip_raw or False,
- disable_slugs=args.disable_slugs or False,
- filter_author=args.author,
- wp_custpost=args.wp_custpost or False,
- wp_attach=args.wp_attach or False,
- attachments=attachments or None)
+ fields2pelican(
+ fields,
+ args.markup,
+ args.output,
+ dircat=args.dircat or False,
+ dirpage=args.dirpage or False,
+ strip_raw=args.strip_raw or False,
+ disable_slugs=args.disable_slugs or False,
+ filter_author=args.author,
+ wp_custpost=args.wp_custpost or False,
+ wp_attach=args.wp_attach or False,
+ attachments=attachments or None,
+ )
diff --git a/pelican/tools/pelican_quickstart.py b/pelican/tools/pelican_quickstart.py
index 4b6d93cc..fba0c9c3 100755
--- a/pelican/tools/pelican_quickstart.py
+++ b/pelican/tools/pelican_quickstart.py
@@ -19,6 +19,7 @@ except ImportError:
try:
import tzlocal
+
if hasattr(tzlocal.get_localzone(), "zone"):
_DEFAULT_TIMEZONE = tzlocal.get_localzone().zone
else:
@@ -28,55 +29,51 @@ except ModuleNotFoundError:
from pelican import __version__
-locale.setlocale(locale.LC_ALL, '')
+locale.setlocale(locale.LC_ALL, "")
try:
_DEFAULT_LANGUAGE = locale.getlocale()[0]
except ValueError:
# Don't fail on macosx: "unknown locale: UTF-8"
_DEFAULT_LANGUAGE = None
if _DEFAULT_LANGUAGE is None:
- _DEFAULT_LANGUAGE = 'en'
+ _DEFAULT_LANGUAGE = "en"
else:
- _DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split('_')[0]
+ _DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split("_")[0]
-_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
- "templates")
+_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
_jinja_env = Environment(
loader=FileSystemLoader(_TEMPLATES_DIR),
trim_blocks=True,
)
-_GITHUB_PAGES_BRANCHES = {
- 'personal': 'main',
- 'project': 'gh-pages'
-}
+_GITHUB_PAGES_BRANCHES = {"personal": "main", "project": "gh-pages"}
CONF = {
- 'pelican': 'pelican',
- 'pelicanopts': '',
- 'basedir': os.curdir,
- 'ftp_host': 'localhost',
- 'ftp_user': 'anonymous',
- 'ftp_target_dir': '/',
- 'ssh_host': 'localhost',
- 'ssh_port': 22,
- 'ssh_user': 'root',
- 'ssh_target_dir': '/var/www',
- 's3_bucket': 'my_s3_bucket',
- 'cloudfiles_username': 'my_rackspace_username',
- 'cloudfiles_api_key': 'my_rackspace_api_key',
- 'cloudfiles_container': 'my_cloudfiles_container',
- 'dropbox_dir': '~/Dropbox/Public/',
- 'github_pages_branch': _GITHUB_PAGES_BRANCHES['project'],
- 'default_pagination': 10,
- 'siteurl': '',
- 'lang': _DEFAULT_LANGUAGE,
- 'timezone': _DEFAULT_TIMEZONE
+ "pelican": "pelican",
+ "pelicanopts": "",
+ "basedir": os.curdir,
+ "ftp_host": "localhost",
+ "ftp_user": "anonymous",
+ "ftp_target_dir": "/",
+ "ssh_host": "localhost",
+ "ssh_port": 22,
+ "ssh_user": "root",
+ "ssh_target_dir": "/var/www",
+ "s3_bucket": "my_s3_bucket",
+ "cloudfiles_username": "my_rackspace_username",
+ "cloudfiles_api_key": "my_rackspace_api_key",
+ "cloudfiles_container": "my_cloudfiles_container",
+ "dropbox_dir": "~/Dropbox/Public/",
+ "github_pages_branch": _GITHUB_PAGES_BRANCHES["project"],
+ "default_pagination": 10,
+ "siteurl": "",
+ "lang": _DEFAULT_LANGUAGE,
+ "timezone": _DEFAULT_TIMEZONE,
}
# url for list of valid timezones
-_TZ_URL = 'https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
+_TZ_URL = "https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
# Create a 'marked' default path, to determine if someone has supplied
@@ -90,12 +87,12 @@ _DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
def ask(question, answer=str, default=None, length=None):
if answer == str:
- r = ''
+ r = ""
while True:
if default:
- r = input('> {} [{}] '.format(question, default))
+ r = input("> {} [{}] ".format(question, default))
else:
- r = input('> {} '.format(question))
+ r = input("> {} ".format(question))
r = r.strip()
@@ -104,10 +101,10 @@ def ask(question, answer=str, default=None, length=None):
r = default
break
else:
- print('You must enter something')
+ print("You must enter something")
else:
if length and len(r) != length:
- print('Entry must be {} characters long'.format(length))
+ print("Entry must be {} characters long".format(length))
else:
break
@@ -117,18 +114,18 @@ def ask(question, answer=str, default=None, length=None):
r = None
while True:
if default is True:
- r = input('> {} (Y/n) '.format(question))
+ r = input("> {} (Y/n) ".format(question))
elif default is False:
- r = input('> {} (y/N) '.format(question))
+ r = input("> {} (y/N) ".format(question))
else:
- r = input('> {} (y/n) '.format(question))
+ r = input("> {} (y/n) ".format(question))
r = r.strip().lower()
- if r in ('y', 'yes'):
+ if r in ("y", "yes"):
r = True
break
- elif r in ('n', 'no'):
+ elif r in ("n", "no"):
r = False
break
elif not r:
@@ -141,9 +138,9 @@ def ask(question, answer=str, default=None, length=None):
r = None
while True:
if default:
- r = input('> {} [{}] '.format(question, default))
+ r = input("> {} [{}] ".format(question, default))
else:
- r = input('> {} '.format(question))
+ r = input("> {} ".format(question))
r = r.strip()
@@ -155,11 +152,10 @@ def ask(question, answer=str, default=None, length=None):
r = int(r)
break
except ValueError:
- print('You must enter an integer')
+ print("You must enter an integer")
return r
else:
- raise NotImplementedError(
- 'Argument `answer` must be str, bool, or integer')
+ raise NotImplementedError("Argument `answer` must be str, bool, or integer")
def ask_timezone(question, default, tzurl):
@@ -178,162 +174,227 @@ def ask_timezone(question, default, tzurl):
def render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str):
try:
- with open(os.path.join(CONF['basedir'], target_path),
- 'w', encoding='utf-8') as fd:
+ with open(
+ os.path.join(CONF["basedir"], target_path), "w", encoding="utf-8"
+ ) as fd:
_template = _jinja_env.get_template(tmpl_name)
fd.write(_template.render(**tmpl_vars))
except OSError as e:
- print('Error: {}'.format(e))
+ print("Error: {}".format(e))
def main():
parser = argparse.ArgumentParser(
description="A kickstarter for Pelican",
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('-p', '--path', default=_DEFAULT_PATH,
- help="The path to generate the blog into")
- parser.add_argument('-t', '--title', metavar="title",
- help='Set the title of the website')
- parser.add_argument('-a', '--author', metavar="author",
- help='Set the author name of the website')
- parser.add_argument('-l', '--lang', metavar="lang",
- help='Set the default web site language')
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "-p", "--path", default=_DEFAULT_PATH, help="The path to generate the blog into"
+ )
+ parser.add_argument(
+ "-t", "--title", metavar="title", help="Set the title of the website"
+ )
+ parser.add_argument(
+ "-a", "--author", metavar="author", help="Set the author name of the website"
+ )
+ parser.add_argument(
+ "-l", "--lang", metavar="lang", help="Set the default web site language"
+ )
args = parser.parse_args()
- print('''Welcome to pelican-quickstart v{v}.
+ print(
+ """Welcome to pelican-quickstart v{v}.
This script will help you create a new Pelican-based website.
Please answer the following questions so this script can generate the files
needed by Pelican.
- '''.format(v=__version__))
+ """.format(v=__version__)
+ )
- project = os.path.join(
- os.environ.get('VIRTUAL_ENV', os.curdir), '.project')
- no_path_was_specified = hasattr(args.path, 'is_default_path')
+ project = os.path.join(os.environ.get("VIRTUAL_ENV", os.curdir), ".project")
+ no_path_was_specified = hasattr(args.path, "is_default_path")
if os.path.isfile(project) and no_path_was_specified:
- CONF['basedir'] = open(project).read().rstrip("\n")
- print('Using project associated with current virtual environment. '
- 'Will save to:\n%s\n' % CONF['basedir'])
+ CONF["basedir"] = open(project).read().rstrip("\n")
+ print(
+ "Using project associated with current virtual environment. "
+ "Will save to:\n%s\n" % CONF["basedir"]
+ )
else:
- CONF['basedir'] = os.path.abspath(os.path.expanduser(
- ask('Where do you want to create your new web site?',
- answer=str, default=args.path)))
+ CONF["basedir"] = os.path.abspath(
+ os.path.expanduser(
+ ask(
+ "Where do you want to create your new web site?",
+ answer=str,
+ default=args.path,
+ )
+ )
+ )
- CONF['sitename'] = ask('What will be the title of this web site?',
- answer=str, default=args.title)
- CONF['author'] = ask('Who will be the author of this web site?',
- answer=str, default=args.author)
- CONF['lang'] = ask('What will be the default language of this web site?',
- str, args.lang or CONF['lang'], 2)
+ CONF["sitename"] = ask(
+ "What will be the title of this web site?", answer=str, default=args.title
+ )
+ CONF["author"] = ask(
+ "Who will be the author of this web site?", answer=str, default=args.author
+ )
+ CONF["lang"] = ask(
+ "What will be the default language of this web site?",
+ str,
+ args.lang or CONF["lang"],
+ 2,
+ )
- if ask('Do you want to specify a URL prefix? e.g., https://example.com ',
- answer=bool, default=True):
- CONF['siteurl'] = ask('What is your URL prefix? (see '
- 'above example; no trailing slash)',
- str, CONF['siteurl'])
+ if ask(
+ "Do you want to specify a URL prefix? e.g., https://example.com ",
+ answer=bool,
+ default=True,
+ ):
+ CONF["siteurl"] = ask(
+ "What is your URL prefix? (see " "above example; no trailing slash)",
+ str,
+ CONF["siteurl"],
+ )
- CONF['with_pagination'] = ask('Do you want to enable article pagination?',
- bool, bool(CONF['default_pagination']))
+ CONF["with_pagination"] = ask(
+ "Do you want to enable article pagination?",
+ bool,
+ bool(CONF["default_pagination"]),
+ )
- if CONF['with_pagination']:
- CONF['default_pagination'] = ask('How many articles per page '
- 'do you want?',
- int, CONF['default_pagination'])
+ if CONF["with_pagination"]:
+ CONF["default_pagination"] = ask(
+ "How many articles per page " "do you want?",
+ int,
+ CONF["default_pagination"],
+ )
else:
- CONF['default_pagination'] = False
+ CONF["default_pagination"] = False
- CONF['timezone'] = ask_timezone('What is your time zone?',
- CONF['timezone'], _TZ_URL)
+ CONF["timezone"] = ask_timezone(
+ "What is your time zone?", CONF["timezone"], _TZ_URL
+ )
- automation = ask('Do you want to generate a tasks.py/Makefile '
- 'to automate generation and publishing?', bool, True)
+ automation = ask(
+ "Do you want to generate a tasks.py/Makefile "
+ "to automate generation and publishing?",
+ bool,
+ True,
+ )
if automation:
- if ask('Do you want to upload your website using FTP?',
- answer=bool, default=False):
- CONF['ftp'] = True,
- CONF['ftp_host'] = ask('What is the hostname of your FTP server?',
- str, CONF['ftp_host'])
- CONF['ftp_user'] = ask('What is your username on that server?',
- str, CONF['ftp_user'])
- CONF['ftp_target_dir'] = ask('Where do you want to put your '
- 'web site on that server?',
- str, CONF['ftp_target_dir'])
- if ask('Do you want to upload your website using SSH?',
- answer=bool, default=False):
- CONF['ssh'] = True,
- CONF['ssh_host'] = ask('What is the hostname of your SSH server?',
- str, CONF['ssh_host'])
- CONF['ssh_port'] = ask('What is the port of your SSH server?',
- int, CONF['ssh_port'])
- CONF['ssh_user'] = ask('What is your username on that server?',
- str, CONF['ssh_user'])
- CONF['ssh_target_dir'] = ask('Where do you want to put your '
- 'web site on that server?',
- str, CONF['ssh_target_dir'])
+ if ask(
+ "Do you want to upload your website using FTP?", answer=bool, default=False
+ ):
+ CONF["ftp"] = (True,)
+ CONF["ftp_host"] = ask(
+ "What is the hostname of your FTP server?", str, CONF["ftp_host"]
+ )
+ CONF["ftp_user"] = ask(
+ "What is your username on that server?", str, CONF["ftp_user"]
+ )
+ CONF["ftp_target_dir"] = ask(
+ "Where do you want to put your " "web site on that server?",
+ str,
+ CONF["ftp_target_dir"],
+ )
+ if ask(
+ "Do you want to upload your website using SSH?", answer=bool, default=False
+ ):
+ CONF["ssh"] = (True,)
+ CONF["ssh_host"] = ask(
+ "What is the hostname of your SSH server?", str, CONF["ssh_host"]
+ )
+ CONF["ssh_port"] = ask(
+ "What is the port of your SSH server?", int, CONF["ssh_port"]
+ )
+ CONF["ssh_user"] = ask(
+ "What is your username on that server?", str, CONF["ssh_user"]
+ )
+ CONF["ssh_target_dir"] = ask(
+ "Where do you want to put your " "web site on that server?",
+ str,
+ CONF["ssh_target_dir"],
+ )
- if ask('Do you want to upload your website using Dropbox?',
- answer=bool, default=False):
- CONF['dropbox'] = True,
- CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',
- str, CONF['dropbox_dir'])
+ if ask(
+ "Do you want to upload your website using Dropbox?",
+ answer=bool,
+ default=False,
+ ):
+ CONF["dropbox"] = (True,)
+ CONF["dropbox_dir"] = ask(
+ "Where is your Dropbox directory?", str, CONF["dropbox_dir"]
+ )
- if ask('Do you want to upload your website using S3?',
- answer=bool, default=False):
- CONF['s3'] = True,
- CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',
- str, CONF['s3_bucket'])
+ if ask(
+ "Do you want to upload your website using S3?", answer=bool, default=False
+ ):
+ CONF["s3"] = (True,)
+ CONF["s3_bucket"] = ask(
+ "What is the name of your S3 bucket?", str, CONF["s3_bucket"]
+ )
- if ask('Do you want to upload your website using '
- 'Rackspace Cloud Files?', answer=bool, default=False):
- CONF['cloudfiles'] = True,
- CONF['cloudfiles_username'] = ask('What is your Rackspace '
- 'Cloud username?', str,
- CONF['cloudfiles_username'])
- CONF['cloudfiles_api_key'] = ask('What is your Rackspace '
- 'Cloud API key?', str,
- CONF['cloudfiles_api_key'])
- CONF['cloudfiles_container'] = ask('What is the name of your '
- 'Cloud Files container?',
- str,
- CONF['cloudfiles_container'])
+ if ask(
+ "Do you want to upload your website using " "Rackspace Cloud Files?",
+ answer=bool,
+ default=False,
+ ):
+ CONF["cloudfiles"] = (True,)
+ CONF["cloudfiles_username"] = ask(
+ "What is your Rackspace " "Cloud username?",
+ str,
+ CONF["cloudfiles_username"],
+ )
+ CONF["cloudfiles_api_key"] = ask(
+ "What is your Rackspace " "Cloud API key?",
+ str,
+ CONF["cloudfiles_api_key"],
+ )
+ CONF["cloudfiles_container"] = ask(
+ "What is the name of your " "Cloud Files container?",
+ str,
+ CONF["cloudfiles_container"],
+ )
- if ask('Do you want to upload your website using GitHub Pages?',
- answer=bool, default=False):
- CONF['github'] = True,
- if ask('Is this your personal page (username.github.io)?',
- answer=bool, default=False):
- CONF['github_pages_branch'] = \
- _GITHUB_PAGES_BRANCHES['personal']
+ if ask(
+ "Do you want to upload your website using GitHub Pages?",
+ answer=bool,
+ default=False,
+ ):
+ CONF["github"] = (True,)
+ if ask(
+ "Is this your personal page (username.github.io)?",
+ answer=bool,
+ default=False,
+ ):
+ CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["personal"]
else:
- CONF['github_pages_branch'] = \
- _GITHUB_PAGES_BRANCHES['project']
+ CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["project"]
try:
- os.makedirs(os.path.join(CONF['basedir'], 'content'))
+ os.makedirs(os.path.join(CONF["basedir"], "content"))
except OSError as e:
- print('Error: {}'.format(e))
+ print("Error: {}".format(e))
try:
- os.makedirs(os.path.join(CONF['basedir'], 'output'))
+ os.makedirs(os.path.join(CONF["basedir"], "output"))
except OSError as e:
- print('Error: {}'.format(e))
+ print("Error: {}".format(e))
conf_python = dict()
for key, value in CONF.items():
conf_python[key] = repr(value)
- render_jinja_template('pelicanconf.py.jinja2', conf_python, 'pelicanconf.py')
+ render_jinja_template("pelicanconf.py.jinja2", conf_python, "pelicanconf.py")
- render_jinja_template('publishconf.py.jinja2', CONF, 'publishconf.py')
+ render_jinja_template("publishconf.py.jinja2", CONF, "publishconf.py")
if automation:
- render_jinja_template('tasks.py.jinja2', CONF, 'tasks.py')
- render_jinja_template('Makefile.jinja2', CONF, 'Makefile')
+ render_jinja_template("tasks.py.jinja2", CONF, "tasks.py")
+ render_jinja_template("Makefile.jinja2", CONF, "Makefile")
- print('Done. Your new project is available at %s' % CONF['basedir'])
+ print("Done. Your new project is available at %s" % CONF["basedir"])
if __name__ == "__main__":
diff --git a/pelican/tools/pelican_themes.py b/pelican/tools/pelican_themes.py
index 1ad3a333..4069f99b 100755
--- a/pelican/tools/pelican_themes.py
+++ b/pelican/tools/pelican_themes.py
@@ -8,7 +8,7 @@ import sys
def err(msg, die=None):
"""Print an error message and exits if an exit code is given"""
- sys.stderr.write(msg + '\n')
+ sys.stderr.write(msg + "\n")
if die:
sys.exit(die if isinstance(die, int) else 1)
@@ -16,62 +16,96 @@ def err(msg, die=None):
try:
import pelican
except ImportError:
- err('Cannot import pelican.\nYou must '
- 'install Pelican in order to run this script.',
- -1)
+ err(
+ "Cannot import pelican.\nYou must "
+ "install Pelican in order to run this script.",
+ -1,
+ )
global _THEMES_PATH
_THEMES_PATH = os.path.join(
- os.path.dirname(
- os.path.abspath(pelican.__file__)
- ),
- 'themes'
+ os.path.dirname(os.path.abspath(pelican.__file__)), "themes"
)
-__version__ = '0.2'
-_BUILTIN_THEMES = ['simple', 'notmyidea']
+__version__ = "0.2"
+_BUILTIN_THEMES = ["simple", "notmyidea"]
def main():
"""Main function"""
- parser = argparse.ArgumentParser(
- description="""Install themes for Pelican""")
+ parser = argparse.ArgumentParser(description="""Install themes for Pelican""")
excl = parser.add_mutually_exclusive_group()
excl.add_argument(
- '-l', '--list', dest='action', action="store_const", const='list',
- help="Show the themes already installed and exit")
+ "-l",
+ "--list",
+ dest="action",
+ action="store_const",
+ const="list",
+ help="Show the themes already installed and exit",
+ )
excl.add_argument(
- '-p', '--path', dest='action', action="store_const", const='path',
- help="Show the themes path and exit")
+ "-p",
+ "--path",
+ dest="action",
+ action="store_const",
+ const="path",
+ help="Show the themes path and exit",
+ )
excl.add_argument(
- '-V', '--version', action='version',
- version='pelican-themes v{}'.format(__version__),
- help='Print the version of this script')
+ "-V",
+ "--version",
+ action="version",
+ version="pelican-themes v{}".format(__version__),
+ help="Print the version of this script",
+ )
parser.add_argument(
- '-i', '--install', dest='to_install', nargs='+', metavar="theme path",
- help='The themes to install')
+ "-i",
+ "--install",
+ dest="to_install",
+ nargs="+",
+ metavar="theme path",
+ help="The themes to install",
+ )
parser.add_argument(
- '-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
- help='The themes to remove')
+ "-r",
+ "--remove",
+ dest="to_remove",
+ nargs="+",
+ metavar="theme name",
+ help="The themes to remove",
+ )
parser.add_argument(
- '-U', '--upgrade', dest='to_upgrade', nargs='+',
- metavar="theme path", help='The themes to upgrade')
+ "-U",
+ "--upgrade",
+ dest="to_upgrade",
+ nargs="+",
+ metavar="theme path",
+ help="The themes to upgrade",
+ )
parser.add_argument(
- '-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
+ "-s",
+ "--symlink",
+ dest="to_symlink",
+ nargs="+",
+ metavar="theme path",
help="Same as `--install', but create a symbolic link instead of "
- "copying the theme. Useful for theme development")
+ "copying the theme. Useful for theme development",
+ )
parser.add_argument(
- '-c', '--clean', dest='clean', action="store_true",
- help="Remove the broken symbolic links of the theme path")
+ "-c",
+ "--clean",
+ dest="clean",
+ action="store_true",
+ help="Remove the broken symbolic links of the theme path",
+ )
parser.add_argument(
- '-v', '--verbose', dest='verbose',
- action="store_true",
- help="Verbose output")
+ "-v", "--verbose", dest="verbose", action="store_true", help="Verbose output"
+ )
args = parser.parse_args()
@@ -79,46 +113,46 @@ def main():
to_sym = args.to_symlink or args.clean
if args.action:
- if args.action == 'list':
+ if args.action == "list":
list_themes(args.verbose)
- elif args.action == 'path':
+ elif args.action == "path":
print(_THEMES_PATH)
elif to_install or args.to_remove or to_sym:
if args.to_remove:
if args.verbose:
- print('Removing themes...')
+ print("Removing themes...")
for i in args.to_remove:
remove(i, v=args.verbose)
if args.to_install:
if args.verbose:
- print('Installing themes...')
+ print("Installing themes...")
for i in args.to_install:
install(i, v=args.verbose)
if args.to_upgrade:
if args.verbose:
- print('Upgrading themes...')
+ print("Upgrading themes...")
for i in args.to_upgrade:
install(i, v=args.verbose, u=True)
if args.to_symlink:
if args.verbose:
- print('Linking themes...')
+ print("Linking themes...")
for i in args.to_symlink:
symlink(i, v=args.verbose)
if args.clean:
if args.verbose:
- print('Cleaning the themes directory...')
+ print("Cleaning the themes directory...")
clean(v=args.verbose)
else:
- print('No argument given... exiting.')
+ print("No argument given... exiting.")
def themes():
@@ -142,7 +176,7 @@ def list_themes(v=False):
if v:
print(theme_path + (" (symbolic link to `" + link_target + "')"))
else:
- print(theme_path + '@')
+ print(theme_path + "@")
else:
print(theme_path)
@@ -150,51 +184,52 @@ def list_themes(v=False):
def remove(theme_name, v=False):
"""Removes a theme"""
- theme_name = theme_name.replace('/', '')
+ theme_name = theme_name.replace("/", "")
target = os.path.join(_THEMES_PATH, theme_name)
if theme_name in _BUILTIN_THEMES:
- err(theme_name + ' is a builtin theme.\n'
- 'You cannot remove a builtin theme with this script, '
- 'remove it by hand if you want.')
+ err(
+ theme_name + " is a builtin theme.\n"
+ "You cannot remove a builtin theme with this script, "
+ "remove it by hand if you want."
+ )
elif os.path.islink(target):
if v:
- print('Removing link `' + target + "'")
+ print("Removing link `" + target + "'")
os.remove(target)
elif os.path.isdir(target):
if v:
- print('Removing directory `' + target + "'")
+ print("Removing directory `" + target + "'")
shutil.rmtree(target)
elif os.path.exists(target):
- err(target + ' : not a valid theme')
+ err(target + " : not a valid theme")
else:
- err(target + ' : no such file or directory')
+ err(target + " : no such file or directory")
def install(path, v=False, u=False):
"""Installs a theme"""
if not os.path.exists(path):
- err(path + ' : no such file or directory')
+ err(path + " : no such file or directory")
elif not os.path.isdir(path):
- err(path + ' : not a directory')
+ err(path + " : not a directory")
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
exists = os.path.exists(theme_path)
if exists and not u:
- err(path + ' : already exists')
+ err(path + " : already exists")
elif exists:
remove(theme_name, v)
install(path, v)
else:
if v:
- print("Copying '{p}' to '{t}' ...".format(p=path,
- t=theme_path))
+ print("Copying '{p}' to '{t}' ...".format(p=path, t=theme_path))
try:
shutil.copytree(path, theme_path)
try:
- if os.name == 'posix':
+ if os.name == "posix":
for root, dirs, files in os.walk(theme_path):
for d in dirs:
dname = os.path.join(root, d)
@@ -203,35 +238,41 @@ def install(path, v=False, u=False):
fname = os.path.join(root, f)
os.chmod(fname, 420) # 0o644
except OSError as e:
- err("Cannot change permissions of files "
- "or directory in `{r}':\n{e}".format(r=theme_path,
- e=str(e)),
- die=False)
+ err(
+ "Cannot change permissions of files "
+ "or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)),
+ die=False,
+ )
except Exception as e:
- err("Cannot copy `{p}' to `{t}':\n{e}".format(
- p=path, t=theme_path, e=str(e)))
+ err(
+ "Cannot copy `{p}' to `{t}':\n{e}".format(
+ p=path, t=theme_path, e=str(e)
+ )
+ )
def symlink(path, v=False):
"""Symbolically link a theme"""
if not os.path.exists(path):
- err(path + ' : no such file or directory')
+ err(path + " : no such file or directory")
elif not os.path.isdir(path):
- err(path + ' : not a directory')
+ err(path + " : not a directory")
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
if os.path.exists(theme_path):
- err(path + ' : already exists')
+ err(path + " : already exists")
else:
if v:
- print("Linking `{p}' to `{t}' ...".format(
- p=path, t=theme_path))
+ print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
try:
os.symlink(path, theme_path)
except Exception as e:
- err("Cannot link `{p}' to `{t}':\n{e}".format(
- p=path, t=theme_path, e=str(e)))
+ err(
+ "Cannot link `{p}' to `{t}':\n{e}".format(
+ p=path, t=theme_path, e=str(e)
+ )
+ )
def is_broken_link(path):
@@ -247,11 +288,11 @@ def clean(v=False):
path = os.path.join(_THEMES_PATH, path)
if os.path.islink(path) and is_broken_link(path):
if v:
- print('Removing {}'.format(path))
+ print("Removing {}".format(path))
try:
os.remove(path)
except OSError:
- print('Error: cannot remove {}'.format(path))
+ print("Error: cannot remove {}".format(path))
else:
c += 1
diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py
index e00b914c..2e8cc953 100644
--- a/pelican/urlwrappers.py
+++ b/pelican/urlwrappers.py
@@ -31,17 +31,16 @@ class URLWrapper:
@property
def slug(self):
if self._slug is None:
- class_key = '{}_REGEX_SUBSTITUTIONS'.format(
- self.__class__.__name__.upper())
+ class_key = "{}_REGEX_SUBSTITUTIONS".format(self.__class__.__name__.upper())
regex_subs = self.settings.get(
- class_key,
- self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
- preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False)
+ class_key, self.settings.get("SLUG_REGEX_SUBSTITUTIONS", [])
+ )
+ preserve_case = self.settings.get("SLUGIFY_PRESERVE_CASE", False)
self._slug = slugify(
self.name,
regex_subs=regex_subs,
preserve_case=preserve_case,
- use_unicode=self.settings.get('SLUGIFY_USE_UNICODE', False)
+ use_unicode=self.settings.get("SLUGIFY_USE_UNICODE", False),
)
return self._slug
@@ -53,26 +52,26 @@ class URLWrapper:
def as_dict(self):
d = self.__dict__
- d['name'] = self.name
- d['slug'] = self.slug
+ d["name"] = self.name
+ d["slug"] = self.slug
return d
def __hash__(self):
return hash(self.slug)
def _normalize_key(self, key):
- class_key = '{}_REGEX_SUBSTITUTIONS'.format(
- self.__class__.__name__.upper())
+ class_key = "{}_REGEX_SUBSTITUTIONS".format(self.__class__.__name__.upper())
regex_subs = self.settings.get(
- class_key,
- self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
- use_unicode = self.settings.get('SLUGIFY_USE_UNICODE', False)
- preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False)
+ class_key, self.settings.get("SLUG_REGEX_SUBSTITUTIONS", [])
+ )
+ use_unicode = self.settings.get("SLUGIFY_USE_UNICODE", False)
+ preserve_case = self.settings.get("SLUGIFY_PRESERVE_CASE", False)
return slugify(
key,
regex_subs=regex_subs,
preserve_case=preserve_case,
- use_unicode=use_unicode)
+ use_unicode=use_unicode,
+ )
def __eq__(self, other):
if isinstance(other, self.__class__):
@@ -99,7 +98,7 @@ class URLWrapper:
return self.name
def __repr__(self):
- return '<{} {}>'.format(type(self).__name__, repr(self._name))
+ return "<{} {}>".format(type(self).__name__, repr(self._name))
def _from_settings(self, key, get_page_name=False):
"""Returns URL information as defined in settings.
@@ -114,7 +113,7 @@ class URLWrapper:
if isinstance(value, pathlib.Path):
value = str(value)
if not isinstance(value, str):
- logger.warning('%s is set to %s', setting, value)
+ logger.warning("%s is set to %s", setting, value)
return value
else:
if get_page_name:
@@ -122,10 +121,11 @@ class URLWrapper:
else:
return value.format(**self.as_dict())
- page_name = property(functools.partial(_from_settings, key='URL',
- get_page_name=True))
- url = property(functools.partial(_from_settings, key='URL'))
- save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
+ page_name = property(
+ functools.partial(_from_settings, key="URL", get_page_name=True)
+ )
+ url = property(functools.partial(_from_settings, key="URL"))
+ save_as = property(functools.partial(_from_settings, key="SAVE_AS"))
class Category(URLWrapper):
diff --git a/pelican/utils.py b/pelican/utils.py
index 09ffcfe6..08a08f7e 100644
--- a/pelican/utils.py
+++ b/pelican/utils.py
@@ -32,38 +32,37 @@ logger = logging.getLogger(__name__)
def sanitised_join(base_directory, *parts):
- joined = posixize_path(
- os.path.abspath(os.path.join(base_directory, *parts)))
+ joined = posixize_path(os.path.abspath(os.path.join(base_directory, *parts)))
base = posixize_path(os.path.abspath(base_directory))
if not joined.startswith(base):
raise RuntimeError(
- "Attempted to break out of output directory to {}".format(
- joined
- )
+ "Attempted to break out of output directory to {}".format(joined)
)
return joined
def strftime(date, date_format):
- '''
+ """
Enhanced replacement for built-in strftime with zero stripping
This works by 'grabbing' possible format strings (those starting with %),
formatting them with the date, stripping any leading zeros if - prefix is
used and replacing formatted output back.
- '''
+ """
+
def strip_zeros(x):
- return x.lstrip('0') or '0'
+ return x.lstrip("0") or "0"
+
# includes ISO date parameters added by Python 3.6
- c89_directives = 'aAbBcdfGHIjmMpSUuVwWxXyYzZ%'
+ c89_directives = "aAbBcdfGHIjmMpSUuVwWxXyYzZ%"
# grab candidate format options
- format_options = '%[-]?.'
+ format_options = "%[-]?."
candidates = re.findall(format_options, date_format)
# replace candidates with placeholders for later % formatting
- template = re.sub(format_options, '%s', date_format)
+ template = re.sub(format_options, "%s", date_format)
formatted_candidates = []
for candidate in candidates:
@@ -72,7 +71,7 @@ def strftime(date, date_format):
# check for '-' prefix
if len(candidate) == 3:
# '-' prefix
- candidate = '%{}'.format(candidate[-1])
+ candidate = "%{}".format(candidate[-1])
conversion = strip_zeros
else:
conversion = None
@@ -95,10 +94,10 @@ def strftime(date, date_format):
class SafeDatetime(datetime.datetime):
- '''Subclass of datetime that works with utf-8 format strings on PY2'''
+ """Subclass of datetime that works with utf-8 format strings on PY2"""
def strftime(self, fmt, safe=True):
- '''Uses our custom strftime if supposed to be *safe*'''
+ """Uses our custom strftime if supposed to be *safe*"""
if safe:
return strftime(self, fmt)
else:
@@ -106,22 +105,21 @@ class SafeDatetime(datetime.datetime):
class DateFormatter:
- '''A date formatter object used as a jinja filter
+ """A date formatter object used as a jinja filter
Uses the `strftime` implementation and makes sure jinja uses the locale
defined in LOCALE setting
- '''
+ """
def __init__(self):
self.locale = locale.setlocale(locale.LC_TIME)
def __call__(self, date, date_format):
-
# on OSX, encoding from LC_CTYPE determines the unicode output in PY3
# make sure it's same as LC_TIME
- with temporary_locale(self.locale, locale.LC_TIME), \
- temporary_locale(self.locale, locale.LC_CTYPE):
-
+ with temporary_locale(self.locale, locale.LC_TIME), temporary_locale(
+ self.locale, locale.LC_CTYPE
+ ):
formatted = strftime(date, date_format)
return formatted
@@ -155,7 +153,7 @@ class memoized:
return self.func.__doc__
def __get__(self, obj, objtype):
- '''Support instance methods.'''
+ """Support instance methods."""
fn = partial(self.__call__, obj)
fn.cache = self.cache
return fn
@@ -177,17 +175,16 @@ def deprecated_attribute(old, new, since=None, remove=None, doc=None):
Note that the decorator needs a dummy method to attach to, but the
content of the dummy method is ignored.
"""
+
def _warn():
- version = '.'.join(str(x) for x in since)
- message = ['{} has been deprecated since {}'.format(old, version)]
+ version = ".".join(str(x) for x in since)
+ message = ["{} has been deprecated since {}".format(old, version)]
if remove:
- version = '.'.join(str(x) for x in remove)
- message.append(
- ' and will be removed by version {}'.format(version))
- message.append('. Use {} instead.'.format(new))
- logger.warning(''.join(message))
- logger.debug(''.join(str(x) for x
- in traceback.format_stack()))
+ version = ".".join(str(x) for x in remove)
+ message.append(" and will be removed by version {}".format(version))
+ message.append(". Use {} instead.".format(new))
+ logger.warning("".join(message))
+ logger.debug("".join(str(x) for x in traceback.format_stack()))
def fget(self):
_warn()
@@ -208,21 +205,20 @@ def get_date(string):
If no format matches the given date, raise a ValueError.
"""
- string = re.sub(' +', ' ', string)
- default = SafeDatetime.now().replace(hour=0, minute=0,
- second=0, microsecond=0)
+ string = re.sub(" +", " ", string)
+ default = SafeDatetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
try:
return dateutil.parser.parse(string, default=default)
except (TypeError, ValueError):
- raise ValueError('{!r} is not a valid date'.format(string))
+ raise ValueError("{!r} is not a valid date".format(string))
@contextmanager
-def pelican_open(filename, mode='r', strip_crs=(sys.platform == 'win32')):
+def pelican_open(filename, mode="r", strip_crs=(sys.platform == "win32")):
"""Open a file and return its content"""
# utf-8-sig will clear any BOM if present
- with open(filename, mode, encoding='utf-8-sig') as infile:
+ with open(filename, mode, encoding="utf-8-sig") as infile:
content = infile.read()
yield content
@@ -244,7 +240,7 @@ def slugify(value, regex_subs=(), preserve_case=False, use_unicode=False):
def normalize_unicode(text):
# normalize text by compatibility composition
# see: https://en.wikipedia.org/wiki/Unicode_equivalence
- return unicodedata.normalize('NFKC', text)
+ return unicodedata.normalize("NFKC", text)
# strip tags from value
value = Markup(value).striptags()
@@ -259,10 +255,8 @@ def slugify(value, regex_subs=(), preserve_case=False, use_unicode=False):
# perform regex substitutions
for src, dst in regex_subs:
value = re.sub(
- normalize_unicode(src),
- normalize_unicode(dst),
- value,
- flags=re.IGNORECASE)
+ normalize_unicode(src), normalize_unicode(dst), value, flags=re.IGNORECASE
+ )
if not preserve_case:
value = value.lower()
@@ -283,8 +277,7 @@ def copy(source, destination, ignores=None):
"""
def walk_error(err):
- logger.warning("While copying %s: %s: %s",
- source_, err.filename, err.strerror)
+ logger.warning("While copying %s: %s: %s", source_, err.filename, err.strerror)
source_ = os.path.abspath(os.path.expanduser(source))
destination_ = os.path.abspath(os.path.expanduser(destination))
@@ -292,39 +285,40 @@ def copy(source, destination, ignores=None):
if ignores is None:
ignores = []
- if any(fnmatch.fnmatch(os.path.basename(source), ignore)
- for ignore in ignores):
- logger.info('Not copying %s due to ignores', source_)
+ if any(fnmatch.fnmatch(os.path.basename(source), ignore) for ignore in ignores):
+ logger.info("Not copying %s due to ignores", source_)
return
if os.path.isfile(source_):
dst_dir = os.path.dirname(destination_)
if not os.path.exists(dst_dir):
- logger.info('Creating directory %s', dst_dir)
+ logger.info("Creating directory %s", dst_dir)
os.makedirs(dst_dir)
- logger.info('Copying %s to %s', source_, destination_)
+ logger.info("Copying %s to %s", source_, destination_)
copy_file(source_, destination_)
elif os.path.isdir(source_):
if not os.path.exists(destination_):
- logger.info('Creating directory %s', destination_)
+ logger.info("Creating directory %s", destination_)
os.makedirs(destination_)
if not os.path.isdir(destination_):
- logger.warning('Cannot copy %s (a directory) to %s (a file)',
- source_, destination_)
+ logger.warning(
+ "Cannot copy %s (a directory) to %s (a file)", source_, destination_
+ )
return
for src_dir, subdirs, others in os.walk(source_, followlinks=True):
- dst_dir = os.path.join(destination_,
- os.path.relpath(src_dir, source_))
+ dst_dir = os.path.join(destination_, os.path.relpath(src_dir, source_))
- subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i)
- for i in ignores))
- others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
- for i in ignores))
+ subdirs[:] = (
+ s for s in subdirs if not any(fnmatch.fnmatch(s, i) for i in ignores)
+ )
+ others[:] = (
+ o for o in others if not any(fnmatch.fnmatch(o, i) for i in ignores)
+ )
if not os.path.isdir(dst_dir):
- logger.info('Creating directory %s', dst_dir)
+ logger.info("Creating directory %s", dst_dir)
# Parent directories are known to exist, so 'mkdir' suffices.
os.mkdir(dst_dir)
@@ -332,21 +326,24 @@ def copy(source, destination, ignores=None):
src_path = os.path.join(src_dir, o)
dst_path = os.path.join(dst_dir, o)
if os.path.isfile(src_path):
- logger.info('Copying %s to %s', src_path, dst_path)
+ logger.info("Copying %s to %s", src_path, dst_path)
copy_file(src_path, dst_path)
else:
- logger.warning('Skipped copy %s (not a file or '
- 'directory) to %s',
- src_path, dst_path)
+ logger.warning(
+ "Skipped copy %s (not a file or " "directory) to %s",
+ src_path,
+ dst_path,
+ )
def copy_file(source, destination):
- '''Copy a file'''
+ """Copy a file"""
try:
shutil.copyfile(source, destination)
except OSError as e:
- logger.warning("A problem occurred copying file %s to %s; %s",
- source, destination, e)
+ logger.warning(
+ "A problem occurred copying file %s to %s; %s", source, destination, e
+ )
def clean_output_dir(path, retention):
@@ -367,15 +364,15 @@ def clean_output_dir(path, retention):
for filename in os.listdir(path):
file = os.path.join(path, filename)
if any(filename == retain for retain in retention):
- logger.debug("Skipping deletion; %s is on retention list: %s",
- filename, file)
+ logger.debug(
+ "Skipping deletion; %s is on retention list: %s", filename, file
+ )
elif os.path.isdir(file):
try:
shutil.rmtree(file)
logger.debug("Deleted directory %s", file)
except Exception as e:
- logger.error("Unable to delete directory %s; %s",
- file, e)
+ logger.error("Unable to delete directory %s; %s", file, e)
elif os.path.isfile(file) or os.path.islink(file):
try:
os.remove(file)
@@ -407,29 +404,31 @@ def posixize_path(rel_path):
"""Use '/' as path separator, so that source references,
like '{static}/foo/bar.jpg' or 'extras/favicon.ico',
will work on Windows as well as on Mac and Linux."""
- return rel_path.replace(os.sep, '/')
+ return rel_path.replace(os.sep, "/")
class _HTMLWordTruncator(HTMLParser):
-
- _word_regex = re.compile(r"{DBC}|(\w[\w'-]*)".format(
- # DBC means CJK-like characters. An character can stand for a word.
- DBC=("([\u4E00-\u9FFF])|" # CJK Unified Ideographs
- "([\u3400-\u4DBF])|" # CJK Unified Ideographs Extension A
- "([\uF900-\uFAFF])|" # CJK Compatibility Ideographs
- "([\U00020000-\U0002A6DF])|" # CJK Unified Ideographs Extension B
- "([\U0002F800-\U0002FA1F])|" # CJK Compatibility Ideographs Supplement
- "([\u3040-\u30FF])|" # Hiragana and Katakana
- "([\u1100-\u11FF])|" # Hangul Jamo
- "([\uAC00-\uD7FF])|" # Hangul Compatibility Jamo
- "([\u3130-\u318F])" # Hangul Syllables
- )), re.UNICODE)
- _word_prefix_regex = re.compile(r'\w', re.U)
- _singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area',
- 'hr', 'input')
+ _word_regex = re.compile(
+ r"{DBC}|(\w[\w'-]*)".format(
+ # DBC means CJK-like characters. An character can stand for a word.
+ DBC=(
+ "([\u4E00-\u9FFF])|" # CJK Unified Ideographs
+ "([\u3400-\u4DBF])|" # CJK Unified Ideographs Extension A
+ "([\uF900-\uFAFF])|" # CJK Compatibility Ideographs
+ "([\U00020000-\U0002A6DF])|" # CJK Unified Ideographs Extension B
+ "([\U0002F800-\U0002FA1F])|" # CJK Compatibility Ideographs Supplement
+ "([\u3040-\u30FF])|" # Hiragana and Katakana
+ "([\u1100-\u11FF])|" # Hangul Jamo
+ "([\uAC00-\uD7FF])|" # Hangul Compatibility Jamo
+ "([\u3130-\u318F])" # Hangul Syllables
+ )
+ ),
+ re.UNICODE,
+ )
+ _word_prefix_regex = re.compile(r"\w", re.U)
+ _singlets = ("br", "col", "link", "base", "img", "param", "area", "hr", "input")
class TruncationCompleted(Exception):
-
def __init__(self, truncate_at):
super().__init__(truncate_at)
self.truncate_at = truncate_at
@@ -455,7 +454,7 @@ class _HTMLWordTruncator(HTMLParser):
line_start = 0
lineno, line_offset = self.getpos()
for i in range(lineno - 1):
- line_start = self.rawdata.index('\n', line_start) + 1
+ line_start = self.rawdata.index("\n", line_start) + 1
return line_start + line_offset
def add_word(self, word_end):
@@ -482,7 +481,7 @@ class _HTMLWordTruncator(HTMLParser):
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
- del self.open_tags[:i + 1]
+ del self.open_tags[: i + 1]
def handle_data(self, data):
word_end = 0
@@ -531,7 +530,7 @@ class _HTMLWordTruncator(HTMLParser):
ref_end = offset + len(name) + 1
try:
- if self.rawdata[ref_end] == ';':
+ if self.rawdata[ref_end] == ";":
ref_end += 1
except IndexError:
# We are at the end of the string and there's no ';'
@@ -556,7 +555,7 @@ class _HTMLWordTruncator(HTMLParser):
codepoint = entities.name2codepoint[name]
char = chr(codepoint)
except KeyError:
- char = ''
+ char = ""
self._handle_ref(name, char)
def handle_charref(self, name):
@@ -567,17 +566,17 @@ class _HTMLWordTruncator(HTMLParser):
`#x2014`)
"""
try:
- if name.startswith('x'):
+ if name.startswith("x"):
codepoint = int(name[1:], 16)
else:
codepoint = int(name)
char = chr(codepoint)
except (ValueError, OverflowError):
- char = ''
- self._handle_ref('#' + name, char)
+ char = ""
+ self._handle_ref("#" + name, char)
-def truncate_html_words(s, num, end_text='…'):
+def truncate_html_words(s, num, end_text="…"):
"""Truncates HTML to a certain number of words.
(not counting tags and comments). Closes opened tags if they were correctly
@@ -588,23 +587,23 @@ def truncate_html_words(s, num, end_text='…'):
"""
length = int(num)
if length <= 0:
- return ''
+ return ""
truncator = _HTMLWordTruncator(length)
truncator.feed(s)
if truncator.truncate_at is None:
return s
- out = s[:truncator.truncate_at]
+ out = s[: truncator.truncate_at]
if end_text:
- out += ' ' + end_text
+ out += " " + end_text
# Close any tags still open
for tag in truncator.open_tags:
- out += '%s>' % tag
+ out += "%s>" % tag
# Return string
return out
def process_translations(content_list, translation_id=None):
- """ Finds translations and returns them.
+ """Finds translations and returns them.
For each content_list item, populates the 'translations' attribute, and
returns a tuple with two lists (index, translations). Index list includes
@@ -632,19 +631,23 @@ def process_translations(content_list, translation_id=None):
try:
content_list.sort(key=attrgetter(*translation_id))
except TypeError:
- raise TypeError('Cannot unpack {}, \'translation_id\' must be falsy, a'
- ' string or a collection of strings'
- .format(translation_id))
+ raise TypeError(
+ "Cannot unpack {}, 'translation_id' must be falsy, a"
+ " string or a collection of strings".format(translation_id)
+ )
except AttributeError:
- raise AttributeError('Cannot use {} as \'translation_id\', there '
- 'appear to be items without these metadata '
- 'attributes'.format(translation_id))
+ raise AttributeError(
+ "Cannot use {} as 'translation_id', there "
+ "appear to be items without these metadata "
+ "attributes".format(translation_id)
+ )
for id_vals, items in groupby(content_list, attrgetter(*translation_id)):
# prepare warning string
id_vals = (id_vals,) if len(translation_id) == 1 else id_vals
- with_str = 'with' + ', '.join([' {} "{{}}"'] * len(translation_id))\
- .format(*translation_id).format(*id_vals)
+ with_str = "with" + ", ".join([' {} "{{}}"'] * len(translation_id)).format(
+ *translation_id
+ ).format(*id_vals)
items = list(items)
original_items = get_original_items(items, with_str)
@@ -662,24 +665,24 @@ def get_original_items(items, with_str):
args = [len(items)]
args.extend(extra)
args.extend(x.source_path for x in items)
- logger.warning('{}: {}'.format(msg, '\n%s' * len(items)), *args)
+ logger.warning("{}: {}".format(msg, "\n%s" * len(items)), *args)
# warn if several items have the same lang
- for lang, lang_items in groupby(items, attrgetter('lang')):
+ for lang, lang_items in groupby(items, attrgetter("lang")):
lang_items = list(lang_items)
if len(lang_items) > 1:
- _warn_source_paths('There are %s items "%s" with lang %s',
- lang_items, with_str, lang)
+ _warn_source_paths(
+ 'There are %s items "%s" with lang %s', lang_items, with_str, lang
+ )
# items with `translation` metadata will be used as translations...
candidate_items = [
- i for i in items
- if i.metadata.get('translation', 'false').lower() == 'false']
+ i for i in items if i.metadata.get("translation", "false").lower() == "false"
+ ]
# ...unless all items with that slug are translations
if not candidate_items:
- _warn_source_paths('All items ("%s") "%s" are translations',
- items, with_str)
+ _warn_source_paths('All items ("%s") "%s" are translations', items, with_str)
candidate_items = items
# find items with default language
@@ -691,13 +694,14 @@ def get_original_items(items, with_str):
# warn if there are several original items
if len(original_items) > 1:
- _warn_source_paths('There are %s original (not translated) items %s',
- original_items, with_str)
+ _warn_source_paths(
+ "There are %s original (not translated) items %s", original_items, with_str
+ )
return original_items
-def order_content(content_list, order_by='slug'):
- """ Sorts content.
+def order_content(content_list, order_by="slug"):
+ """Sorts content.
order_by can be a string of an attribute or sorting function. If order_by
is defined, content will be ordered by that attribute or sorting function.
@@ -713,22 +717,22 @@ def order_content(content_list, order_by='slug'):
try:
content_list.sort(key=order_by)
except Exception:
- logger.error('Error sorting with function %s', order_by)
+ logger.error("Error sorting with function %s", order_by)
elif isinstance(order_by, str):
- if order_by.startswith('reversed-'):
+ if order_by.startswith("reversed-"):
order_reversed = True
- order_by = order_by.replace('reversed-', '', 1)
+ order_by = order_by.replace("reversed-", "", 1)
else:
order_reversed = False
- if order_by == 'basename':
+ if order_by == "basename":
content_list.sort(
- key=lambda x: os.path.basename(x.source_path or ''),
- reverse=order_reversed)
+ key=lambda x: os.path.basename(x.source_path or ""),
+ reverse=order_reversed,
+ )
else:
try:
- content_list.sort(key=attrgetter(order_by),
- reverse=order_reversed)
+ content_list.sort(key=attrgetter(order_by), reverse=order_reversed)
except AttributeError:
for content in content_list:
try:
@@ -736,26 +740,31 @@ def order_content(content_list, order_by='slug'):
except AttributeError:
logger.warning(
'There is no "%s" attribute in "%s". '
- 'Defaulting to slug order.',
+ "Defaulting to slug order.",
order_by,
content.get_relative_source_path(),
extra={
- 'limit_msg': ('More files are missing '
- 'the needed attribute.')
- })
+ "limit_msg": (
+ "More files are missing "
+ "the needed attribute."
+ )
+ },
+ )
else:
logger.warning(
- 'Invalid *_ORDER_BY setting (%s). '
- 'Valid options are strings and functions.', order_by)
+ "Invalid *_ORDER_BY setting (%s). "
+ "Valid options are strings and functions.",
+ order_by,
+ )
return content_list
def wait_for_changes(settings_file, reader_class, settings):
- content_path = settings.get('PATH', '')
- theme_path = settings.get('THEME', '')
+ content_path = settings.get("PATH", "")
+ theme_path = settings.get("THEME", "")
ignore_files = set(
- fnmatch.translate(pattern) for pattern in settings.get('IGNORE_FILES', [])
+ fnmatch.translate(pattern) for pattern in settings.get("IGNORE_FILES", [])
)
candidate_paths = [
@@ -765,7 +774,7 @@ def wait_for_changes(settings_file, reader_class, settings):
]
candidate_paths.extend(
- os.path.join(content_path, path) for path in settings.get('STATIC_PATHS', [])
+ os.path.join(content_path, path) for path in settings.get("STATIC_PATHS", [])
)
watching_paths = []
@@ -778,11 +787,13 @@ def wait_for_changes(settings_file, reader_class, settings):
else:
watching_paths.append(path)
- return next(watchfiles.watch(
- *watching_paths,
- watch_filter=watchfiles.DefaultFilter(ignore_entity_patterns=ignore_files),
- rust_timeout=0
- ))
+ return next(
+ watchfiles.watch(
+ *watching_paths,
+ watch_filter=watchfiles.DefaultFilter(ignore_entity_patterns=ignore_files),
+ rust_timeout=0,
+ )
+ )
def set_date_tzinfo(d, tz_name=None):
@@ -811,7 +822,7 @@ def split_all(path):
"""
if isinstance(path, str):
components = []
- path = path.lstrip('/')
+ path = path.lstrip("/")
while path:
head, tail = os.path.split(path)
if tail:
@@ -827,32 +838,30 @@ def split_all(path):
return None
else:
raise TypeError(
- '"path" was {}, must be string, None, or pathlib.Path'.format(
- type(path)
- )
+ '"path" was {}, must be string, None, or pathlib.Path'.format(type(path))
)
def is_selected_for_writing(settings, path):
- '''Check whether path is selected for writing
+ """Check whether path is selected for writing
according to the WRITE_SELECTED list
If WRITE_SELECTED is an empty list (default),
any path is selected for writing.
- '''
- if settings['WRITE_SELECTED']:
- return path in settings['WRITE_SELECTED']
+ """
+ if settings["WRITE_SELECTED"]:
+ return path in settings["WRITE_SELECTED"]
else:
return True
def path_to_file_url(path):
- '''Convert file-system path to file:// URL'''
+ """Convert file-system path to file:// URL"""
return urllib.parse.urljoin("file://", urllib.request.pathname2url(path))
def maybe_pluralize(count, singular, plural):
- '''
+ """
Returns a formatted string containing count and plural if count is not 1
Returns count and singular if count is 1
@@ -860,22 +869,22 @@ def maybe_pluralize(count, singular, plural):
maybe_pluralize(1, 'Article', 'Articles') -> '1 Article'
maybe_pluralize(2, 'Article', 'Articles') -> '2 Articles'
- '''
+ """
selection = plural
if count == 1:
selection = singular
- return '{} {}'.format(count, selection)
+ return "{} {}".format(count, selection)
@contextmanager
def temporary_locale(temp_locale=None, lc_category=locale.LC_ALL):
- '''
+ """
Enable code to run in a context with a temporary locale
Resets the locale back when exiting context.
Use tests.support.TestCaseWithCLocale if you want every unit test in a
class to use the C locale.
- '''
+ """
orig_locale = locale.setlocale(lc_category)
if temp_locale:
locale.setlocale(lc_category, temp_locale)
diff --git a/pelican/writers.py b/pelican/writers.py
index 632c6b87..ec12d125 100644
--- a/pelican/writers.py
+++ b/pelican/writers.py
@@ -9,14 +9,18 @@ from markupsafe import Markup
from pelican.paginator import Paginator
from pelican.plugins import signals
-from pelican.utils import (get_relative_path, is_selected_for_writing,
- path_to_url, sanitised_join, set_date_tzinfo)
+from pelican.utils import (
+ get_relative_path,
+ is_selected_for_writing,
+ path_to_url,
+ sanitised_join,
+ set_date_tzinfo,
+)
logger = logging.getLogger(__name__)
class Writer:
-
def __init__(self, output_path, settings=None):
self.output_path = output_path
self.reminder = dict()
@@ -25,24 +29,26 @@ class Writer:
self._overridden_files = set()
# See Content._link_replacer for details
- if "RELATIVE_URLS" in self.settings and self.settings['RELATIVE_URLS']:
+ if "RELATIVE_URLS" in self.settings and self.settings["RELATIVE_URLS"]:
self.urljoiner = posix_join
else:
self.urljoiner = lambda base, url: urljoin(
- base if base.endswith('/') else base + '/', str(url))
+ base if base.endswith("/") else base + "/", str(url)
+ )
def _create_new_feed(self, feed_type, feed_title, context):
- feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed
+ feed_class = Rss201rev2Feed if feed_type == "rss" else Atom1Feed
if feed_title:
- feed_title = context['SITENAME'] + ' - ' + feed_title
+ feed_title = context["SITENAME"] + " - " + feed_title
else:
- feed_title = context['SITENAME']
+ feed_title = context["SITENAME"]
return feed_class(
title=Markup(feed_title).striptags(),
- link=(self.site_url + '/'),
+ link=(self.site_url + "/"),
feed_url=self.feed_url,
- description=context.get('SITESUBTITLE', ''),
- subtitle=context.get('SITESUBTITLE', None))
+ description=context.get("SITESUBTITLE", ""),
+ subtitle=context.get("SITESUBTITLE", None),
+ )
def _add_item_to_the_feed(self, feed, item):
title = Markup(item.title).striptags()
@@ -52,7 +58,7 @@ class Writer:
# RSS feeds use a single tag called 'description' for both the full
# content and the summary
content = None
- if self.settings.get('RSS_FEED_SUMMARY_ONLY'):
+ if self.settings.get("RSS_FEED_SUMMARY_ONLY"):
description = item.summary
else:
description = item.get_content(self.site_url)
@@ -71,9 +77,9 @@ class Writer:
description = None
categories = []
- if hasattr(item, 'category'):
+ if hasattr(item, "category"):
categories.append(item.category)
- if hasattr(item, 'tags'):
+ if hasattr(item, "tags"):
categories.extend(item.tags)
feed.add_item(
@@ -83,14 +89,12 @@ class Writer:
description=description,
content=content,
categories=categories or None,
- author_name=getattr(item, 'author', ''),
- pubdate=set_date_tzinfo(
- item.date, self.settings.get('TIMEZONE', None)
- ),
+ author_name=getattr(item, "author", ""),
+ pubdate=set_date_tzinfo(item.date, self.settings.get("TIMEZONE", None)),
updateddate=set_date_tzinfo(
- item.modified, self.settings.get('TIMEZONE', None)
+ item.modified, self.settings.get("TIMEZONE", None)
)
- if hasattr(item, 'modified')
+ if hasattr(item, "modified")
else None,
)
@@ -102,22 +106,29 @@ class Writer:
"""
if filename in self._overridden_files:
if override:
- raise RuntimeError('File %s is set to be overridden twice'
- % filename)
- logger.info('Skipping %s', filename)
+ raise RuntimeError("File %s is set to be overridden twice" % filename)
+ logger.info("Skipping %s", filename)
filename = os.devnull
elif filename in self._written_files:
if override:
- logger.info('Overwriting %s', filename)
+ logger.info("Overwriting %s", filename)
else:
- raise RuntimeError('File %s is to be overwritten' % filename)
+ raise RuntimeError("File %s is to be overwritten" % filename)
if override:
self._overridden_files.add(filename)
self._written_files.add(filename)
- return open(filename, 'w', encoding=encoding)
+ return open(filename, "w", encoding=encoding)
- def write_feed(self, elements, context, path=None, url=None,
- feed_type='atom', override_output=False, feed_title=None):
+ def write_feed(
+ self,
+ elements,
+ context,
+ path=None,
+ url=None,
+ feed_type="atom",
+ override_output=False,
+ feed_title=None,
+ ):
"""Generate a feed with the list of articles provided
Return the feed. If no path or output_path is specified, just
@@ -137,16 +148,15 @@ class Writer:
if not is_selected_for_writing(self.settings, path):
return
- self.site_url = context.get(
- 'SITEURL', path_to_url(get_relative_path(path)))
+ self.site_url = context.get("SITEURL", path_to_url(get_relative_path(path)))
- self.feed_domain = context.get('FEED_DOMAIN')
+ self.feed_domain = context.get("FEED_DOMAIN")
self.feed_url = self.urljoiner(self.feed_domain, url or path)
feed = self._create_new_feed(feed_type, feed_title, context)
# FEED_MAX_ITEMS = None means [:None] to get every element
- for element in elements[:self.settings['FEED_MAX_ITEMS']]:
+ for element in elements[: self.settings["FEED_MAX_ITEMS"]]:
self._add_item_to_the_feed(feed, element)
signals.feed_generated.send(context, feed=feed)
@@ -158,17 +168,25 @@ class Writer:
except Exception:
pass
- with self._open_w(complete_path, 'utf-8', override_output) as fp:
- feed.write(fp, 'utf-8')
- logger.info('Writing %s', complete_path)
+ with self._open_w(complete_path, "utf-8", override_output) as fp:
+ feed.write(fp, "utf-8")
+ logger.info("Writing %s", complete_path)
- signals.feed_written.send(
- complete_path, context=context, feed=feed)
+ signals.feed_written.send(complete_path, context=context, feed=feed)
return feed
- def write_file(self, name, template, context, relative_urls=False,
- paginated=None, template_name=None, override_output=False,
- url=None, **kwargs):
+ def write_file(
+ self,
+ name,
+ template,
+ context,
+ relative_urls=False,
+ paginated=None,
+ template_name=None,
+ override_output=False,
+ url=None,
+ **kwargs,
+ ):
"""Render the template and write the file.
:param name: name of the file to output
@@ -185,10 +203,13 @@ class Writer:
:param **kwargs: additional variables to pass to the templates
"""
- if name is False or \
- name == "" or \
- not is_selected_for_writing(self.settings,
- os.path.join(self.output_path, name)):
+ if (
+ name is False
+ or name == ""
+ or not is_selected_for_writing(
+ self.settings, os.path.join(self.output_path, name)
+ )
+ ):
return
elif not name:
# other stuff, just return for now
@@ -197,8 +218,8 @@ class Writer:
def _write_file(template, localcontext, output_path, name, override):
"""Render the template write the file."""
# set localsiteurl for context so that Contents can adjust links
- if localcontext['localsiteurl']:
- context['localsiteurl'] = localcontext['localsiteurl']
+ if localcontext["localsiteurl"]:
+ context["localsiteurl"] = localcontext["localsiteurl"]
output = template.render(localcontext)
path = sanitised_join(output_path, name)
@@ -207,9 +228,9 @@ class Writer:
except Exception:
pass
- with self._open_w(path, 'utf-8', override=override) as f:
+ with self._open_w(path, "utf-8", override=override) as f:
f.write(output)
- logger.info('Writing %s', path)
+ logger.info("Writing %s", path)
# Send a signal to say we're writing a file with some specific
# local context.
@@ -217,54 +238,66 @@ class Writer:
def _get_localcontext(context, name, kwargs, relative_urls):
localcontext = context.copy()
- localcontext['localsiteurl'] = localcontext.get(
- 'localsiteurl', None)
+ localcontext["localsiteurl"] = localcontext.get("localsiteurl", None)
if relative_urls:
relative_url = path_to_url(get_relative_path(name))
- localcontext['SITEURL'] = relative_url
- localcontext['localsiteurl'] = relative_url
- localcontext['output_file'] = name
+ localcontext["SITEURL"] = relative_url
+ localcontext["localsiteurl"] = relative_url
+ localcontext["output_file"] = name
localcontext.update(kwargs)
return localcontext
if paginated is None:
- paginated = {key: val for key, val in kwargs.items()
- if key in {'articles', 'dates'}}
+ paginated = {
+ key: val for key, val in kwargs.items() if key in {"articles", "dates"}
+ }
# pagination
- if paginated and template_name in self.settings['PAGINATED_TEMPLATES']:
+ if paginated and template_name in self.settings["PAGINATED_TEMPLATES"]:
# pagination needed
- per_page = self.settings['PAGINATED_TEMPLATES'][template_name] \
- or self.settings['DEFAULT_PAGINATION']
+ per_page = (
+ self.settings["PAGINATED_TEMPLATES"][template_name]
+ or self.settings["DEFAULT_PAGINATION"]
+ )
# init paginators
- paginators = {key: Paginator(name, url, val, self.settings,
- per_page)
- for key, val in paginated.items()}
+ paginators = {
+ key: Paginator(name, url, val, self.settings, per_page)
+ for key, val in paginated.items()
+ }
# generated pages, and write
for page_num in range(list(paginators.values())[0].num_pages):
paginated_kwargs = kwargs.copy()
for key in paginators.keys():
paginator = paginators[key]
- previous_page = paginator.page(page_num) \
- if page_num > 0 else None
+ previous_page = paginator.page(page_num) if page_num > 0 else None
page = paginator.page(page_num + 1)
- next_page = paginator.page(page_num + 2) \
- if page_num + 1 < paginator.num_pages else None
+ next_page = (
+ paginator.page(page_num + 2)
+ if page_num + 1 < paginator.num_pages
+ else None
+ )
paginated_kwargs.update(
- {'%s_paginator' % key: paginator,
- '%s_page' % key: page,
- '%s_previous_page' % key: previous_page,
- '%s_next_page' % key: next_page})
+ {
+ "%s_paginator" % key: paginator,
+ "%s_page" % key: page,
+ "%s_previous_page" % key: previous_page,
+ "%s_next_page" % key: next_page,
+ }
+ )
localcontext = _get_localcontext(
- context, page.save_as, paginated_kwargs, relative_urls)
- _write_file(template, localcontext, self.output_path,
- page.save_as, override_output)
+ context, page.save_as, paginated_kwargs, relative_urls
+ )
+ _write_file(
+ template,
+ localcontext,
+ self.output_path,
+ page.save_as,
+ override_output,
+ )
else:
# no pagination
- localcontext = _get_localcontext(
- context, name, kwargs, relative_urls)
- _write_file(template, localcontext, self.output_path, name,
- override_output)
+ localcontext = _get_localcontext(context, name, kwargs, relative_urls)
+ _write_file(template, localcontext, self.output_path, name, override_output)
diff --git a/samples/pelican.conf.py b/samples/pelican.conf.py
index 1fa7c472..d10254e8 100755
--- a/samples/pelican.conf.py
+++ b/samples/pelican.conf.py
@@ -1,55 +1,59 @@
-AUTHOR = 'Alexis Métaireau'
+AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log"
-SITESUBTITLE = 'A personal blog.'
-SITEURL = 'http://blog.notmyidea.org'
+SITESUBTITLE = "A personal blog."
+SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "Europe/Paris"
# can be useful in development, but set to False when you're ready to publish
RELATIVE_URLS = True
-GITHUB_URL = 'http://github.com/ametaireau/'
+GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea"
REVERSE_CATEGORY_ORDER = True
LOCALE = "C"
DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
-FEED_ALL_RSS = 'feeds/all.rss.xml'
-CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml'
+FEED_ALL_RSS = "feeds/all.rss.xml"
+CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
-LINKS = (('Biologeek', 'http://biologeek.org'),
- ('Filyb', "http://filyb.info/"),
- ('Libert-fr', "http://www.libert-fr.com"),
- ('N1k0', "http://prendreuncafe.com/blog/"),
- ('Tarek Ziadé', "http://ziade.org/blog"),
- ('Zubin Mithra', "http://zubin71.wordpress.com/"),)
+LINKS = (
+ ("Biologeek", "http://biologeek.org"),
+ ("Filyb", "http://filyb.info/"),
+ ("Libert-fr", "http://www.libert-fr.com"),
+ ("N1k0", "http://prendreuncafe.com/blog/"),
+ ("Tarek Ziadé", "http://ziade.org/blog"),
+ ("Zubin Mithra", "http://zubin71.wordpress.com/"),
+)
-SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
- ('lastfm', 'http://lastfm.com/user/akounet'),
- ('github', 'http://github.com/ametaireau'),)
+SOCIAL = (
+ ("twitter", "http://twitter.com/ametaireau"),
+ ("lastfm", "http://lastfm.com/user/akounet"),
+ ("github", "http://github.com/ametaireau"),
+)
# global metadata to all the contents
-DEFAULT_METADATA = {'yeah': 'it is'}
+DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata
EXTRA_PATH_METADATA = {
- 'extra/robots.txt': {'path': 'robots.txt'},
- }
+ "extra/robots.txt": {"path": "robots.txt"},
+}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
- 'images',
- 'extra/robots.txt',
- ]
+ "images",
+ "extra/robots.txt",
+]
# custom page generated with a jinja2 template
-TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'}
+TEMPLATE_PAGES = {"pages/jinja2_template.html": "jinja2_template.html"}
# there is no other HTML content
-READERS = {'html': None}
+READERS = {"html": None}
# code blocks with line numbers
-PYGMENTS_RST_OPTIONS = {'linenos': 'table'}
+PYGMENTS_RST_OPTIONS = {"linenos": "table"}
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps
diff --git a/samples/pelican.conf_FR.py b/samples/pelican.conf_FR.py
index dc657404..cbca06df 100644
--- a/samples/pelican.conf_FR.py
+++ b/samples/pelican.conf_FR.py
@@ -1,56 +1,60 @@
-AUTHOR = 'Alexis Métaireau'
+AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log"
-SITEURL = 'http://blog.notmyidea.org'
+SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "Europe/Paris"
# can be useful in development, but set to False when you're ready to publish
RELATIVE_URLS = True
-GITHUB_URL = 'http://github.com/ametaireau/'
+GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
LOCALE = "fr_FR.UTF-8"
DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
-DEFAULT_DATE_FORMAT = '%d %B %Y'
+DEFAULT_DATE_FORMAT = "%d %B %Y"
-ARTICLE_URL = 'posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/'
-ARTICLE_SAVE_AS = ARTICLE_URL + 'index.html'
+ARTICLE_URL = "posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/"
+ARTICLE_SAVE_AS = ARTICLE_URL + "index.html"
-FEED_ALL_RSS = 'feeds/all.rss.xml'
-CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml'
+FEED_ALL_RSS = "feeds/all.rss.xml"
+CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
-LINKS = (('Biologeek', 'http://biologeek.org'),
- ('Filyb', "http://filyb.info/"),
- ('Libert-fr', "http://www.libert-fr.com"),
- ('N1k0', "http://prendreuncafe.com/blog/"),
- ('Tarek Ziadé', "http://ziade.org/blog"),
- ('Zubin Mithra', "http://zubin71.wordpress.com/"),)
+LINKS = (
+ ("Biologeek", "http://biologeek.org"),
+ ("Filyb", "http://filyb.info/"),
+ ("Libert-fr", "http://www.libert-fr.com"),
+ ("N1k0", "http://prendreuncafe.com/blog/"),
+ ("Tarek Ziadé", "http://ziade.org/blog"),
+ ("Zubin Mithra", "http://zubin71.wordpress.com/"),
+)
-SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
- ('lastfm', 'http://lastfm.com/user/akounet'),
- ('github', 'http://github.com/ametaireau'),)
+SOCIAL = (
+ ("twitter", "http://twitter.com/ametaireau"),
+ ("lastfm", "http://lastfm.com/user/akounet"),
+ ("github", "http://github.com/ametaireau"),
+)
# global metadata to all the contents
-DEFAULT_METADATA = {'yeah': 'it is'}
+DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata
EXTRA_PATH_METADATA = {
- 'extra/robots.txt': {'path': 'robots.txt'},
- }
+ "extra/robots.txt": {"path": "robots.txt"},
+}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
- 'pictures',
- 'extra/robots.txt',
- ]
+ "pictures",
+ "extra/robots.txt",
+]
# custom page generated with a jinja2 template
-TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'}
+TEMPLATE_PAGES = {"pages/jinja2_template.html": "jinja2_template.html"}
# code blocks with line numbers
-PYGMENTS_RST_OPTIONS = {'linenos': 'table'}
+PYGMENTS_RST_OPTIONS = {"linenos": "table"}
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps