Apply code style to project via: ruff format .

This commit is contained in:
Chris Rose 2023-10-29 22:18:29 +01:00 committed by Justin Mayer
commit cabdb26cee
41 changed files with 6505 additions and 5163 deletions

View file

@ -9,19 +9,25 @@ import sys
import time import time
import traceback import traceback
from collections.abc import Iterable from collections.abc import Iterable
# Combines all paths to `pelican` package accessible from `sys.path` # Combines all paths to `pelican` package accessible from `sys.path`
# Makes it possible to install `pelican` and namespace plugins into different # Makes it possible to install `pelican` and namespace plugins into different
# locations in the file system (e.g. pip with `-e` or `--user`) # locations in the file system (e.g. pip with `-e` or `--user`)
from pkgutil import extend_path from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) __path__ = extend_path(__path__, __name__)
# pelican.log has to be the first pelican module to be loaded # pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger # because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import console from pelican.log import console
from pelican.log import init as init_logging from pelican.log import init as init_logging
from pelican.generators import (ArticlesGenerator, # noqa: I100 from pelican.generators import (
PagesGenerator, SourceFileGenerator, ArticlesGenerator, # noqa: I100
StaticGenerator, TemplatePagesGenerator) PagesGenerator,
SourceFileGenerator,
StaticGenerator,
TemplatePagesGenerator,
)
from pelican.plugins import signals from pelican.plugins import signals
from pelican.plugins._utils import get_plugin_name, load_plugins from pelican.plugins._utils import get_plugin_name, load_plugins
from pelican.readers import Readers from pelican.readers import Readers
@ -35,12 +41,11 @@ try:
except Exception: except Exception:
__version__ = "unknown" __version__ = "unknown"
DEFAULT_CONFIG_NAME = 'pelicanconf.py' DEFAULT_CONFIG_NAME = "pelicanconf.py"
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Pelican: class Pelican:
def __init__(self, settings): def __init__(self, settings):
"""Pelican initialization """Pelican initialization
@ -50,35 +55,34 @@ class Pelican:
# define the default settings # define the default settings
self.settings = settings self.settings = settings
self.path = settings['PATH'] self.path = settings["PATH"]
self.theme = settings['THEME'] self.theme = settings["THEME"]
self.output_path = settings['OUTPUT_PATH'] self.output_path = settings["OUTPUT_PATH"]
self.ignore_files = settings['IGNORE_FILES'] self.ignore_files = settings["IGNORE_FILES"]
self.delete_outputdir = settings['DELETE_OUTPUT_DIRECTORY'] self.delete_outputdir = settings["DELETE_OUTPUT_DIRECTORY"]
self.output_retention = settings['OUTPUT_RETENTION'] self.output_retention = settings["OUTPUT_RETENTION"]
self.init_path() self.init_path()
self.init_plugins() self.init_plugins()
signals.initialized.send(self) signals.initialized.send(self)
def init_path(self): def init_path(self):
if not any(p in sys.path for p in ['', os.curdir]): if not any(p in sys.path for p in ["", os.curdir]):
logger.debug("Adding current directory to system path") logger.debug("Adding current directory to system path")
sys.path.insert(0, '') sys.path.insert(0, "")
def init_plugins(self): def init_plugins(self):
self.plugins = [] self.plugins = []
for plugin in load_plugins(self.settings): for plugin in load_plugins(self.settings):
name = get_plugin_name(plugin) name = get_plugin_name(plugin)
logger.debug('Registering plugin `%s`', name) logger.debug("Registering plugin `%s`", name)
try: try:
plugin.register() plugin.register()
self.plugins.append(plugin) self.plugins.append(plugin)
except Exception as e: except Exception as e:
logger.error('Cannot register plugin `%s`\n%s', logger.error("Cannot register plugin `%s`\n%s", name, e)
name, e)
self.settings['PLUGINS'] = [get_plugin_name(p) for p in self.plugins] self.settings["PLUGINS"] = [get_plugin_name(p) for p in self.plugins]
def run(self): def run(self):
"""Run the generators and return""" """Run the generators and return"""
@ -87,10 +91,10 @@ class Pelican:
context = self.settings.copy() context = self.settings.copy()
# Share these among all the generators and content objects # Share these among all the generators and content objects
# They map source paths to Content objects or None # They map source paths to Content objects or None
context['generated_content'] = {} context["generated_content"] = {}
context['static_links'] = set() context["static_links"] = set()
context['static_content'] = {} context["static_content"] = {}
context['localsiteurl'] = self.settings['SITEURL'] context["localsiteurl"] = self.settings["SITEURL"]
generators = [ generators = [
cls( cls(
@ -99,23 +103,25 @@ class Pelican:
path=self.path, path=self.path,
theme=self.theme, theme=self.theme,
output_path=self.output_path, output_path=self.output_path,
) for cls in self._get_generator_classes() )
for cls in self._get_generator_classes()
] ]
# Delete the output directory if (1) the appropriate setting is True # Delete the output directory if (1) the appropriate setting is True
# and (2) that directory is not the parent of the source directory # and (2) that directory is not the parent of the source directory
if (self.delete_outputdir if self.delete_outputdir and os.path.commonpath(
and os.path.commonpath([os.path.realpath(self.output_path)]) != [os.path.realpath(self.output_path)]
os.path.commonpath([os.path.realpath(self.output_path), ) != os.path.commonpath(
os.path.realpath(self.path)])): [os.path.realpath(self.output_path), os.path.realpath(self.path)]
):
clean_output_dir(self.output_path, self.output_retention) clean_output_dir(self.output_path, self.output_retention)
for p in generators: for p in generators:
if hasattr(p, 'generate_context'): if hasattr(p, "generate_context"):
p.generate_context() p.generate_context()
for p in generators: for p in generators:
if hasattr(p, 'refresh_metadata_intersite_links'): if hasattr(p, "refresh_metadata_intersite_links"):
p.refresh_metadata_intersite_links() p.refresh_metadata_intersite_links()
signals.all_generators_finalized.send(generators) signals.all_generators_finalized.send(generators)
@ -123,61 +129,75 @@ class Pelican:
writer = self._get_writer() writer = self._get_writer()
for p in generators: for p in generators:
if hasattr(p, 'generate_output'): if hasattr(p, "generate_output"):
p.generate_output(writer) p.generate_output(writer)
signals.finalized.send(self) signals.finalized.send(self)
articles_generator = next(g for g in generators articles_generator = next(
if isinstance(g, ArticlesGenerator)) g for g in generators if isinstance(g, ArticlesGenerator)
pages_generator = next(g for g in generators )
if isinstance(g, PagesGenerator)) pages_generator = next(g for g in generators if isinstance(g, PagesGenerator))
pluralized_articles = maybe_pluralize( pluralized_articles = maybe_pluralize(
(len(articles_generator.articles) + (len(articles_generator.articles) + len(articles_generator.translations)),
len(articles_generator.translations)), "article",
'article', "articles",
'articles') )
pluralized_drafts = maybe_pluralize( pluralized_drafts = maybe_pluralize(
(len(articles_generator.drafts) + (
len(articles_generator.drafts_translations)), len(articles_generator.drafts)
'draft', + len(articles_generator.drafts_translations)
'drafts') ),
"draft",
"drafts",
)
pluralized_hidden_articles = maybe_pluralize( pluralized_hidden_articles = maybe_pluralize(
(len(articles_generator.hidden_articles) + (
len(articles_generator.hidden_translations)), len(articles_generator.hidden_articles)
'hidden article', + len(articles_generator.hidden_translations)
'hidden articles') ),
"hidden article",
"hidden articles",
)
pluralized_pages = maybe_pluralize( pluralized_pages = maybe_pluralize(
(len(pages_generator.pages) + (len(pages_generator.pages) + len(pages_generator.translations)),
len(pages_generator.translations)), "page",
'page', "pages",
'pages') )
pluralized_hidden_pages = maybe_pluralize( pluralized_hidden_pages = maybe_pluralize(
(len(pages_generator.hidden_pages) + (
len(pages_generator.hidden_translations)), len(pages_generator.hidden_pages)
'hidden page', + len(pages_generator.hidden_translations)
'hidden pages') ),
"hidden page",
"hidden pages",
)
pluralized_draft_pages = maybe_pluralize( pluralized_draft_pages = maybe_pluralize(
(len(pages_generator.draft_pages) + (
len(pages_generator.draft_translations)), len(pages_generator.draft_pages)
'draft page', + len(pages_generator.draft_translations)
'draft pages') ),
"draft page",
"draft pages",
)
console.print('Done: Processed {}, {}, {}, {}, {} and {} in {:.2f} seconds.' console.print(
.format( "Done: Processed {}, {}, {}, {}, {} and {} in {:.2f} seconds.".format(
pluralized_articles, pluralized_articles,
pluralized_drafts, pluralized_drafts,
pluralized_hidden_articles, pluralized_hidden_articles,
pluralized_pages, pluralized_pages,
pluralized_hidden_pages, pluralized_hidden_pages,
pluralized_draft_pages, pluralized_draft_pages,
time.time() - start_time)) time.time() - start_time,
)
)
def _get_generator_classes(self): def _get_generator_classes(self):
discovered_generators = [ discovered_generators = [
(ArticlesGenerator, "internal"), (ArticlesGenerator, "internal"),
(PagesGenerator, "internal") (PagesGenerator, "internal"),
] ]
if self.settings["TEMPLATE_PAGES"]: if self.settings["TEMPLATE_PAGES"]:
@ -236,7 +256,7 @@ class PrintSettings(argparse.Action):
except Exception as e: except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e) logger.critical("%s: %s", e.__class__.__name__, e)
console.print_exception() console.print_exception()
sys.exit(getattr(e, 'exitcode', 1)) sys.exit(getattr(e, "exitcode", 1))
if values: if values:
# One or more arguments provided, so only print those settings # One or more arguments provided, so only print those settings
@ -244,14 +264,16 @@ class PrintSettings(argparse.Action):
if setting in settings: if setting in settings:
# Only add newline between setting name and value if dict # Only add newline between setting name and value if dict
if isinstance(settings[setting], (dict, tuple, list)): if isinstance(settings[setting], (dict, tuple, list)):
setting_format = '\n{}:\n{}' setting_format = "\n{}:\n{}"
else: else:
setting_format = '\n{}: {}' setting_format = "\n{}: {}"
console.print(setting_format.format( console.print(
setting, setting_format.format(
pprint.pformat(settings[setting]))) setting, pprint.pformat(settings[setting])
)
)
else: else:
console.print('\n{} is not a recognized setting.'.format(setting)) console.print("\n{} is not a recognized setting.".format(setting))
break break
else: else:
# No argument was given to --print-settings, so print all settings # No argument was given to --print-settings, so print all settings
@ -268,170 +290,258 @@ class ParseOverrides(argparse.Action):
k, v = item.split("=", 1) k, v = item.split("=", 1)
except ValueError: except ValueError:
raise ValueError( raise ValueError(
'Extra settings must be specified as KEY=VALUE pairs ' "Extra settings must be specified as KEY=VALUE pairs "
f'but you specified {item}' f"but you specified {item}"
) )
try: try:
overrides[k] = json.loads(v) overrides[k] = json.loads(v)
except json.decoder.JSONDecodeError: except json.decoder.JSONDecodeError:
raise ValueError( raise ValueError(
f'Invalid JSON value: {v}. ' f"Invalid JSON value: {v}. "
'Values specified via -e / --extra-settings flags ' "Values specified via -e / --extra-settings flags "
'must be in JSON notation. ' "must be in JSON notation. "
'Use -e KEY=\'"string"\' to specify a string value; ' "Use -e KEY='\"string\"' to specify a string value; "
'-e KEY=null to specify None; ' "-e KEY=null to specify None; "
'-e KEY=false (or true) to specify False (or True).' "-e KEY=false (or true) to specify False (or True)."
) )
setattr(namespace, self.dest, overrides) setattr(namespace, self.dest, overrides)
def parse_arguments(argv=None): def parse_arguments(argv=None):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='A tool to generate a static blog, ' description="A tool to generate a static blog, "
' with restructured text input files.', " with restructured text input files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter formatter_class=argparse.ArgumentDefaultsHelpFormatter,
) )
parser.add_argument(dest='path', nargs='?', parser.add_argument(
help='Path where to find the content files.', dest="path",
default=None) nargs="?",
help="Path where to find the content files.",
default=None,
)
parser.add_argument('-t', '--theme-path', dest='theme', parser.add_argument(
help='Path where to find the theme templates. If not ' "-t",
'specified, it will use the default one included with ' "--theme-path",
'pelican.') dest="theme",
help="Path where to find the theme templates. If not "
"specified, it will use the default one included with "
"pelican.",
)
parser.add_argument('-o', '--output', dest='output', parser.add_argument(
help='Where to output the generated files. If not ' "-o",
'specified, a directory will be created, named ' "--output",
'"output" in the current path.') dest="output",
help="Where to output the generated files. If not "
"specified, a directory will be created, named "
'"output" in the current path.',
)
parser.add_argument('-s', '--settings', dest='settings', parser.add_argument(
help='The settings of the application, this is ' "-s",
'automatically set to {} if a file exists with this ' "--settings",
'name.'.format(DEFAULT_CONFIG_NAME)) dest="settings",
help="The settings of the application, this is "
"automatically set to {} if a file exists with this "
"name.".format(DEFAULT_CONFIG_NAME),
)
parser.add_argument('-d', '--delete-output-directory', parser.add_argument(
dest='delete_outputdir', action='store_true', "-d",
default=None, help='Delete the output directory.') "--delete-output-directory",
dest="delete_outputdir",
action="store_true",
default=None,
help="Delete the output directory.",
)
parser.add_argument('-v', '--verbose', action='store_const', parser.add_argument(
const=logging.INFO, dest='verbosity', "-v",
help='Show all messages.') "--verbose",
action="store_const",
const=logging.INFO,
dest="verbosity",
help="Show all messages.",
)
parser.add_argument('-q', '--quiet', action='store_const', parser.add_argument(
const=logging.CRITICAL, dest='verbosity', "-q",
help='Show only critical errors.') "--quiet",
action="store_const",
const=logging.CRITICAL,
dest="verbosity",
help="Show only critical errors.",
)
parser.add_argument('-D', '--debug', action='store_const', parser.add_argument(
const=logging.DEBUG, dest='verbosity', "-D",
help='Show all messages, including debug messages.') "--debug",
action="store_const",
const=logging.DEBUG,
dest="verbosity",
help="Show all messages, including debug messages.",
)
parser.add_argument('--version', action='version', version=__version__, parser.add_argument(
help='Print the pelican version and exit.') "--version",
action="version",
version=__version__,
help="Print the pelican version and exit.",
)
parser.add_argument('-r', '--autoreload', dest='autoreload', parser.add_argument(
action='store_true', "-r",
help='Relaunch pelican each time a modification occurs' "--autoreload",
' on the content files.') dest="autoreload",
action="store_true",
help="Relaunch pelican each time a modification occurs"
" on the content files.",
)
parser.add_argument('--print-settings', dest='print_settings', nargs='*', parser.add_argument(
action=PrintSettings, metavar='SETTING_NAME', "--print-settings",
help='Print current configuration settings and exit. ' dest="print_settings",
'Append one or more setting name arguments to see the ' nargs="*",
'values for specific settings only.') action=PrintSettings,
metavar="SETTING_NAME",
help="Print current configuration settings and exit. "
"Append one or more setting name arguments to see the "
"values for specific settings only.",
)
parser.add_argument('--relative-urls', dest='relative_paths', parser.add_argument(
action='store_true', "--relative-urls",
help='Use relative urls in output, ' dest="relative_paths",
'useful for site development') action="store_true",
help="Use relative urls in output, " "useful for site development",
)
parser.add_argument('--cache-path', dest='cache_path', parser.add_argument(
help=('Directory in which to store cache files. ' "--cache-path",
'If not specified, defaults to "cache".')) dest="cache_path",
help=(
"Directory in which to store cache files. "
'If not specified, defaults to "cache".'
),
)
parser.add_argument('--ignore-cache', action='store_true', parser.add_argument(
dest='ignore_cache', help='Ignore content cache ' "--ignore-cache",
'from previous runs by not loading cache files.') action="store_true",
dest="ignore_cache",
help="Ignore content cache " "from previous runs by not loading cache files.",
)
parser.add_argument('-w', '--write-selected', type=str, parser.add_argument(
dest='selected_paths', default=None, "-w",
help='Comma separated list of selected paths to write') "--write-selected",
type=str,
dest="selected_paths",
default=None,
help="Comma separated list of selected paths to write",
)
parser.add_argument('--fatal', metavar='errors|warnings', parser.add_argument(
choices=('errors', 'warnings'), default='', "--fatal",
help=('Exit the program with non-zero status if any ' metavar="errors|warnings",
'errors/warnings encountered.')) choices=("errors", "warnings"),
default="",
help=(
"Exit the program with non-zero status if any "
"errors/warnings encountered."
),
)
parser.add_argument('--logs-dedup-min-level', default='WARNING', parser.add_argument(
choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'), "--logs-dedup-min-level",
help=('Only enable log de-duplication for levels equal' default="WARNING",
' to or above the specified value')) choices=("DEBUG", "INFO", "WARNING", "ERROR"),
help=(
"Only enable log de-duplication for levels equal"
" to or above the specified value"
),
)
parser.add_argument('-l', '--listen', dest='listen', action='store_true', parser.add_argument(
help='Serve content files via HTTP and port 8000.') "-l",
"--listen",
dest="listen",
action="store_true",
help="Serve content files via HTTP and port 8000.",
)
parser.add_argument('-p', '--port', dest='port', type=int, parser.add_argument(
help='Port to serve HTTP files at. (default: 8000)') "-p",
"--port",
dest="port",
type=int,
help="Port to serve HTTP files at. (default: 8000)",
)
parser.add_argument('-b', '--bind', dest='bind', parser.add_argument(
help='IP to bind to when serving files via HTTP ' "-b",
'(default: 127.0.0.1)') "--bind",
dest="bind",
help="IP to bind to when serving files via HTTP " "(default: 127.0.0.1)",
)
parser.add_argument('-e', '--extra-settings', dest='overrides', parser.add_argument(
help='Specify one or more SETTING=VALUE pairs to ' "-e",
'override settings. VALUE must be in JSON notation: ' "--extra-settings",
'specify string values as SETTING=\'"some string"\'; ' dest="overrides",
'booleans as SETTING=true or SETTING=false; ' help="Specify one or more SETTING=VALUE pairs to "
'None as SETTING=null.', "override settings. VALUE must be in JSON notation: "
nargs='*', "specify string values as SETTING='\"some string\"'; "
action=ParseOverrides, "booleans as SETTING=true or SETTING=false; "
default={}) "None as SETTING=null.",
nargs="*",
action=ParseOverrides,
default={},
)
args = parser.parse_args(argv) args = parser.parse_args(argv)
if args.port is not None and not args.listen: if args.port is not None and not args.listen:
logger.warning('--port without --listen has no effect') logger.warning("--port without --listen has no effect")
if args.bind is not None and not args.listen: if args.bind is not None and not args.listen:
logger.warning('--bind without --listen has no effect') logger.warning("--bind without --listen has no effect")
return args return args
def get_config(args): def get_config(args):
"""Builds a config dictionary based on supplied `args`. """Builds a config dictionary based on supplied `args`."""
"""
config = {} config = {}
if args.path: if args.path:
config['PATH'] = os.path.abspath(os.path.expanduser(args.path)) config["PATH"] = os.path.abspath(os.path.expanduser(args.path))
if args.output: if args.output:
config['OUTPUT_PATH'] = \ config["OUTPUT_PATH"] = os.path.abspath(os.path.expanduser(args.output))
os.path.abspath(os.path.expanduser(args.output))
if args.theme: if args.theme:
abstheme = os.path.abspath(os.path.expanduser(args.theme)) abstheme = os.path.abspath(os.path.expanduser(args.theme))
config['THEME'] = abstheme if os.path.exists(abstheme) else args.theme config["THEME"] = abstheme if os.path.exists(abstheme) else args.theme
if args.delete_outputdir is not None: if args.delete_outputdir is not None:
config['DELETE_OUTPUT_DIRECTORY'] = args.delete_outputdir config["DELETE_OUTPUT_DIRECTORY"] = args.delete_outputdir
if args.ignore_cache: if args.ignore_cache:
config['LOAD_CONTENT_CACHE'] = False config["LOAD_CONTENT_CACHE"] = False
if args.cache_path: if args.cache_path:
config['CACHE_PATH'] = args.cache_path config["CACHE_PATH"] = args.cache_path
if args.selected_paths: if args.selected_paths:
config['WRITE_SELECTED'] = args.selected_paths.split(',') config["WRITE_SELECTED"] = args.selected_paths.split(",")
if args.relative_paths: if args.relative_paths:
config['RELATIVE_URLS'] = args.relative_paths config["RELATIVE_URLS"] = args.relative_paths
if args.port is not None: if args.port is not None:
config['PORT'] = args.port config["PORT"] = args.port
if args.bind is not None: if args.bind is not None:
config['BIND'] = args.bind config["BIND"] = args.bind
config['DEBUG'] = args.verbosity == logging.DEBUG config["DEBUG"] = args.verbosity == logging.DEBUG
config.update(args.overrides) config.update(args.overrides)
return config return config
def get_instance(args): def get_instance(args):
config_file = args.settings config_file = args.settings
if config_file is None and os.path.isfile(DEFAULT_CONFIG_NAME): if config_file is None and os.path.isfile(DEFAULT_CONFIG_NAME):
config_file = DEFAULT_CONFIG_NAME config_file = DEFAULT_CONFIG_NAME
@ -439,9 +549,9 @@ def get_instance(args):
settings = read_settings(config_file, override=get_config(args)) settings = read_settings(config_file, override=get_config(args))
cls = settings['PELICAN_CLASS'] cls = settings["PELICAN_CLASS"]
if isinstance(cls, str): if isinstance(cls, str):
module, cls_name = cls.rsplit('.', 1) module, cls_name = cls.rsplit(".", 1)
module = __import__(module) module = __import__(module)
cls = getattr(module, cls_name) cls = getattr(module, cls_name)
@ -449,8 +559,10 @@ def get_instance(args):
def autoreload(args, excqueue=None): def autoreload(args, excqueue=None):
console.print(' --- AutoReload Mode: Monitoring `content`, `theme` and' console.print(
' `settings` for changes. ---') " --- AutoReload Mode: Monitoring `content`, `theme` and"
" `settings` for changes. ---"
)
pelican, settings = get_instance(args) pelican, settings = get_instance(args)
settings_file = os.path.abspath(args.settings) settings_file = os.path.abspath(args.settings)
while True: while True:
@ -463,8 +575,9 @@ def autoreload(args, excqueue=None):
if settings_file in changed_files: if settings_file in changed_files:
pelican, settings = get_instance(args) pelican, settings = get_instance(args)
console.print('\n-> Modified: {}. re-generating...'.format( console.print(
', '.join(changed_files))) "\n-> Modified: {}. re-generating...".format(", ".join(changed_files))
)
except KeyboardInterrupt: except KeyboardInterrupt:
if excqueue is not None: if excqueue is not None:
@ -473,15 +586,14 @@ def autoreload(args, excqueue=None):
raise raise
except Exception as e: except Exception as e:
if (args.verbosity == logging.DEBUG): if args.verbosity == logging.DEBUG:
if excqueue is not None: if excqueue is not None:
excqueue.put( excqueue.put(traceback.format_exception_only(type(e), e)[-1])
traceback.format_exception_only(type(e), e)[-1])
else: else:
raise raise
logger.warning( logger.warning(
'Caught exception:\n"%s".', e, 'Caught exception:\n"%s".', e, exc_info=settings.get("DEBUG", False)
exc_info=settings.get('DEBUG', False)) )
def listen(server, port, output, excqueue=None): def listen(server, port, output, excqueue=None):
@ -491,8 +603,7 @@ def listen(server, port, output, excqueue=None):
RootedHTTPServer.allow_reuse_address = True RootedHTTPServer.allow_reuse_address = True
try: try:
httpd = RootedHTTPServer( httpd = RootedHTTPServer(output, (server, port), ComplexHTTPRequestHandler)
output, (server, port), ComplexHTTPRequestHandler)
except OSError as e: except OSError as e:
logging.error("Could not listen on port %s, server %s.", port, server) logging.error("Could not listen on port %s, server %s.", port, server)
if excqueue is not None: if excqueue is not None:
@ -500,8 +611,9 @@ def listen(server, port, output, excqueue=None):
return return
try: try:
console.print("Serving site at: http://{}:{} - Tap CTRL-C to stop".format( console.print(
server, port)) "Serving site at: http://{}:{} - Tap CTRL-C to stop".format(server, port)
)
httpd.serve_forever() httpd.serve_forever()
except Exception as e: except Exception as e:
if excqueue is not None: if excqueue is not None:
@ -518,24 +630,31 @@ def listen(server, port, output, excqueue=None):
def main(argv=None): def main(argv=None):
args = parse_arguments(argv) args = parse_arguments(argv)
logs_dedup_min_level = getattr(logging, args.logs_dedup_min_level) logs_dedup_min_level = getattr(logging, args.logs_dedup_min_level)
init_logging(level=args.verbosity, fatal=args.fatal, init_logging(
name=__name__, logs_dedup_min_level=logs_dedup_min_level) level=args.verbosity,
fatal=args.fatal,
name=__name__,
logs_dedup_min_level=logs_dedup_min_level,
)
logger.debug('Pelican version: %s', __version__) logger.debug("Pelican version: %s", __version__)
logger.debug('Python version: %s', sys.version.split()[0]) logger.debug("Python version: %s", sys.version.split()[0])
try: try:
pelican, settings = get_instance(args) pelican, settings = get_instance(args)
if args.autoreload and args.listen: if args.autoreload and args.listen:
excqueue = multiprocessing.Queue() excqueue = multiprocessing.Queue()
p1 = multiprocessing.Process( p1 = multiprocessing.Process(target=autoreload, args=(args, excqueue))
target=autoreload,
args=(args, excqueue))
p2 = multiprocessing.Process( p2 = multiprocessing.Process(
target=listen, target=listen,
args=(settings.get('BIND'), settings.get('PORT'), args=(
settings.get("OUTPUT_PATH"), excqueue)) settings.get("BIND"),
settings.get("PORT"),
settings.get("OUTPUT_PATH"),
excqueue,
),
)
try: try:
p1.start() p1.start()
p2.start() p2.start()
@ -548,16 +667,17 @@ def main(argv=None):
elif args.autoreload: elif args.autoreload:
autoreload(args) autoreload(args)
elif args.listen: elif args.listen:
listen(settings.get('BIND'), settings.get('PORT'), listen(
settings.get("OUTPUT_PATH")) settings.get("BIND"), settings.get("PORT"), settings.get("OUTPUT_PATH")
)
else: else:
with console.status("Generating..."): with console.status("Generating..."):
pelican.run() pelican.run()
except KeyboardInterrupt: except KeyboardInterrupt:
logger.warning('Keyboard interrupt received. Exiting.') logger.warning("Keyboard interrupt received. Exiting.")
except Exception as e: except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e) logger.critical("%s: %s", e.__class__.__name__, e)
if args.verbosity == logging.DEBUG: if args.verbosity == logging.DEBUG:
console.print_exception() console.print_exception()
sys.exit(getattr(e, 'exitcode', 1)) sys.exit(getattr(e, "exitcode", 1))

View file

@ -5,5 +5,5 @@ python -m pelican module entry point to run via python -m
from . import main from . import main
if __name__ == '__main__': if __name__ == "__main__":
main() main()

View file

@ -19,29 +19,35 @@ class FileDataCacher:
Sets caching policy according to *caching_policy*. Sets caching policy according to *caching_policy*.
""" """
self.settings = settings self.settings = settings
self._cache_path = os.path.join(self.settings['CACHE_PATH'], self._cache_path = os.path.join(self.settings["CACHE_PATH"], cache_name)
cache_name)
self._cache_data_policy = caching_policy self._cache_data_policy = caching_policy
if self.settings['GZIP_CACHE']: if self.settings["GZIP_CACHE"]:
import gzip import gzip
self._cache_open = gzip.open self._cache_open = gzip.open
else: else:
self._cache_open = open self._cache_open = open
if load_policy: if load_policy:
try: try:
with self._cache_open(self._cache_path, 'rb') as fhandle: with self._cache_open(self._cache_path, "rb") as fhandle:
self._cache = pickle.load(fhandle) self._cache = pickle.load(fhandle)
except (OSError, UnicodeDecodeError) as err: except (OSError, UnicodeDecodeError) as err:
logger.debug('Cannot load cache %s (this is normal on first ' logger.debug(
'run). Proceeding with empty cache.\n%s', "Cannot load cache %s (this is normal on first "
self._cache_path, err) "run). Proceeding with empty cache.\n%s",
self._cache_path,
err,
)
self._cache = {} self._cache = {}
except pickle.PickleError as err: except pickle.PickleError as err:
logger.warning('Cannot unpickle cache %s, cache may be using ' logger.warning(
'an incompatible protocol (see pelican ' "Cannot unpickle cache %s, cache may be using "
'caching docs). ' "an incompatible protocol (see pelican "
'Proceeding with empty cache.\n%s', "caching docs). "
self._cache_path, err) "Proceeding with empty cache.\n%s",
self._cache_path,
err,
)
self._cache = {} self._cache = {}
else: else:
self._cache = {} self._cache = {}
@ -62,12 +68,13 @@ class FileDataCacher:
"""Save the updated cache""" """Save the updated cache"""
if self._cache_data_policy: if self._cache_data_policy:
try: try:
mkdir_p(self.settings['CACHE_PATH']) mkdir_p(self.settings["CACHE_PATH"])
with self._cache_open(self._cache_path, 'wb') as fhandle: with self._cache_open(self._cache_path, "wb") as fhandle:
pickle.dump(self._cache, fhandle) pickle.dump(self._cache, fhandle)
except (OSError, pickle.PicklingError, TypeError) as err: except (OSError, pickle.PicklingError, TypeError) as err:
logger.warning('Could not save cache %s\n ... %s', logger.warning(
self._cache_path, err) "Could not save cache %s\n ... %s", self._cache_path, err
)
class FileStampDataCacher(FileDataCacher): class FileStampDataCacher(FileDataCacher):
@ -80,8 +87,8 @@ class FileStampDataCacher(FileDataCacher):
super().__init__(settings, cache_name, caching_policy, load_policy) super().__init__(settings, cache_name, caching_policy, load_policy)
method = self.settings['CHECK_MODIFIED_METHOD'] method = self.settings["CHECK_MODIFIED_METHOD"]
if method == 'mtime': if method == "mtime":
self._filestamp_func = os.path.getmtime self._filestamp_func = os.path.getmtime
else: else:
try: try:
@ -89,12 +96,12 @@ class FileStampDataCacher(FileDataCacher):
def filestamp_func(filename): def filestamp_func(filename):
"""return hash of file contents""" """return hash of file contents"""
with open(filename, 'rb') as fhandle: with open(filename, "rb") as fhandle:
return hash_func(fhandle.read()).digest() return hash_func(fhandle.read()).digest()
self._filestamp_func = filestamp_func self._filestamp_func = filestamp_func
except AttributeError as err: except AttributeError as err:
logger.warning('Could not get hashing function\n\t%s', err) logger.warning("Could not get hashing function\n\t%s", err)
self._filestamp_func = None self._filestamp_func = None
def cache_data(self, filename, data): def cache_data(self, filename, data):
@ -115,9 +122,8 @@ class FileStampDataCacher(FileDataCacher):
try: try:
return self._filestamp_func(filename) return self._filestamp_func(filename)
except (OSError, TypeError) as err: except (OSError, TypeError) as err:
logger.warning('Cannot get modification stamp for %s\n\t%s', logger.warning("Cannot get modification stamp for %s\n\t%s", filename, err)
filename, err) return ""
return ''
def get_cached_data(self, filename, default=None): def get_cached_data(self, filename, default=None):
"""Get the cached data for the given filename """Get the cached data for the given filename

View file

@ -16,12 +16,19 @@ except ModuleNotFoundError:
from pelican.plugins import signals from pelican.plugins import signals
from pelican.settings import DEFAULT_CONFIG from pelican.settings import DEFAULT_CONFIG
from pelican.utils import (deprecated_attribute, memoized, path_to_url, from pelican.utils import (
posixize_path, sanitised_join, set_date_tzinfo, deprecated_attribute,
slugify, truncate_html_words) memoized,
path_to_url,
posixize_path,
sanitised_join,
set_date_tzinfo,
slugify,
truncate_html_words,
)
# Import these so that they're available when you import from pelican.contents. # Import these so that they're available when you import from pelican.contents.
from pelican.urlwrappers import (Author, Category, Tag, URLWrapper) # NOQA from pelican.urlwrappers import Author, Category, Tag, URLWrapper # NOQA
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -36,12 +43,14 @@ class Content:
:param context: The shared context between generators. :param context: The shared context between generators.
""" """
@deprecated_attribute(old='filename', new='source_path', since=(3, 2, 0))
@deprecated_attribute(old="filename", new="source_path", since=(3, 2, 0))
def filename(): def filename():
return None return None
def __init__(self, content, metadata=None, settings=None, def __init__(
source_path=None, context=None): self, content, metadata=None, settings=None, source_path=None, context=None
):
if metadata is None: if metadata is None:
metadata = {} metadata = {}
if settings is None: if settings is None:
@ -59,8 +68,8 @@ class Content:
# set metadata as attributes # set metadata as attributes
for key, value in local_metadata.items(): for key, value in local_metadata.items():
if key in ('save_as', 'url'): if key in ("save_as", "url"):
key = 'override_' + key key = "override_" + key
setattr(self, key.lower(), value) setattr(self, key.lower(), value)
# also keep track of the metadata attributes available # also keep track of the metadata attributes available
@ -71,53 +80,52 @@ class Content:
# First, read the authors from "authors", if not, fallback to "author" # First, read the authors from "authors", if not, fallback to "author"
# and if not use the settings defined one, if any. # and if not use the settings defined one, if any.
if not hasattr(self, 'author'): if not hasattr(self, "author"):
if hasattr(self, 'authors'): if hasattr(self, "authors"):
self.author = self.authors[0] self.author = self.authors[0]
elif 'AUTHOR' in settings: elif "AUTHOR" in settings:
self.author = Author(settings['AUTHOR'], settings) self.author = Author(settings["AUTHOR"], settings)
if not hasattr(self, 'authors') and hasattr(self, 'author'): if not hasattr(self, "authors") and hasattr(self, "author"):
self.authors = [self.author] self.authors = [self.author]
# XXX Split all the following code into pieces, there is too much here. # XXX Split all the following code into pieces, there is too much here.
# manage languages # manage languages
self.in_default_lang = True self.in_default_lang = True
if 'DEFAULT_LANG' in settings: if "DEFAULT_LANG" in settings:
default_lang = settings['DEFAULT_LANG'].lower() default_lang = settings["DEFAULT_LANG"].lower()
if not hasattr(self, 'lang'): if not hasattr(self, "lang"):
self.lang = default_lang self.lang = default_lang
self.in_default_lang = (self.lang == default_lang) self.in_default_lang = self.lang == default_lang
# create the slug if not existing, generate slug according to # create the slug if not existing, generate slug according to
# setting of SLUG_ATTRIBUTE # setting of SLUG_ATTRIBUTE
if not hasattr(self, 'slug'): if not hasattr(self, "slug"):
if (settings['SLUGIFY_SOURCE'] == 'title' and if settings["SLUGIFY_SOURCE"] == "title" and hasattr(self, "title"):
hasattr(self, 'title')):
value = self.title value = self.title
elif (settings['SLUGIFY_SOURCE'] == 'basename' and elif settings["SLUGIFY_SOURCE"] == "basename" and source_path is not None:
source_path is not None):
value = os.path.basename(os.path.splitext(source_path)[0]) value = os.path.basename(os.path.splitext(source_path)[0])
else: else:
value = None value = None
if value is not None: if value is not None:
self.slug = slugify( self.slug = slugify(
value, value,
regex_subs=settings.get('SLUG_REGEX_SUBSTITUTIONS', []), regex_subs=settings.get("SLUG_REGEX_SUBSTITUTIONS", []),
preserve_case=settings.get('SLUGIFY_PRESERVE_CASE', False), preserve_case=settings.get("SLUGIFY_PRESERVE_CASE", False),
use_unicode=settings.get('SLUGIFY_USE_UNICODE', False)) use_unicode=settings.get("SLUGIFY_USE_UNICODE", False),
)
self.source_path = source_path self.source_path = source_path
self.relative_source_path = self.get_relative_source_path() self.relative_source_path = self.get_relative_source_path()
# manage the date format # manage the date format
if not hasattr(self, 'date_format'): if not hasattr(self, "date_format"):
if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']: if hasattr(self, "lang") and self.lang in settings["DATE_FORMATS"]:
self.date_format = settings['DATE_FORMATS'][self.lang] self.date_format = settings["DATE_FORMATS"][self.lang]
else: else:
self.date_format = settings['DEFAULT_DATE_FORMAT'] self.date_format = settings["DEFAULT_DATE_FORMAT"]
if isinstance(self.date_format, tuple): if isinstance(self.date_format, tuple):
locale_string = self.date_format[0] locale_string = self.date_format[0]
@ -129,22 +137,22 @@ class Content:
timezone = getattr(self, "timezone", default_timezone) timezone = getattr(self, "timezone", default_timezone)
self.timezone = ZoneInfo(timezone) self.timezone = ZoneInfo(timezone)
if hasattr(self, 'date'): if hasattr(self, "date"):
self.date = set_date_tzinfo(self.date, timezone) self.date = set_date_tzinfo(self.date, timezone)
self.locale_date = self.date.strftime(self.date_format) self.locale_date = self.date.strftime(self.date_format)
if hasattr(self, 'modified'): if hasattr(self, "modified"):
self.modified = set_date_tzinfo(self.modified, timezone) self.modified = set_date_tzinfo(self.modified, timezone)
self.locale_modified = self.modified.strftime(self.date_format) self.locale_modified = self.modified.strftime(self.date_format)
# manage status # manage status
if not hasattr(self, 'status'): if not hasattr(self, "status"):
# Previous default of None broke comment plugins and perhaps others # Previous default of None broke comment plugins and perhaps others
self.status = getattr(self, 'default_status', '') self.status = getattr(self, "default_status", "")
# store the summary metadata if it is set # store the summary metadata if it is set
if 'summary' in metadata: if "summary" in metadata:
self._summary = metadata['summary'] self._summary = metadata["summary"]
signals.content_object_init.send(self) signals.content_object_init.send(self)
@ -156,8 +164,8 @@ class Content:
for prop in self.mandatory_properties: for prop in self.mandatory_properties:
if not hasattr(self, prop): if not hasattr(self, prop):
logger.error( logger.error(
"Skipping %s: could not find information about '%s'", "Skipping %s: could not find information about '%s'", self, prop
self, prop) )
return False return False
return True return True
@ -183,12 +191,13 @@ class Content:
return True return True
def _has_valid_status(self): def _has_valid_status(self):
if hasattr(self, 'allowed_statuses'): if hasattr(self, "allowed_statuses"):
if self.status not in self.allowed_statuses: if self.status not in self.allowed_statuses:
logger.error( logger.error(
"Unknown status '%s' for file %s, skipping it. (Not in %s)", "Unknown status '%s' for file %s, skipping it. (Not in %s)",
self.status, self.status,
self, self.allowed_statuses self,
self.allowed_statuses,
) )
return False return False
@ -198,42 +207,48 @@ class Content:
def is_valid(self): def is_valid(self):
"""Validate Content""" """Validate Content"""
# Use all() to not short circuit and get results of all validations # Use all() to not short circuit and get results of all validations
return all([self._has_valid_mandatory_properties(), return all(
self._has_valid_save_as(), [
self._has_valid_status()]) self._has_valid_mandatory_properties(),
self._has_valid_save_as(),
self._has_valid_status(),
]
)
@property @property
def url_format(self): def url_format(self):
"""Returns the URL, formatted with the proper values""" """Returns the URL, formatted with the proper values"""
metadata = copy.copy(self.metadata) metadata = copy.copy(self.metadata)
path = self.metadata.get('path', self.get_relative_source_path()) path = self.metadata.get("path", self.get_relative_source_path())
metadata.update({ metadata.update(
'path': path_to_url(path), {
'slug': getattr(self, 'slug', ''), "path": path_to_url(path),
'lang': getattr(self, 'lang', 'en'), "slug": getattr(self, "slug", ""),
'date': getattr(self, 'date', datetime.datetime.now()), "lang": getattr(self, "lang", "en"),
'author': self.author.slug if hasattr(self, 'author') else '', "date": getattr(self, "date", datetime.datetime.now()),
'category': self.category.slug if hasattr(self, 'category') else '' "author": self.author.slug if hasattr(self, "author") else "",
}) "category": self.category.slug if hasattr(self, "category") else "",
}
)
return metadata return metadata
def _expand_settings(self, key, klass=None): def _expand_settings(self, key, klass=None):
if not klass: if not klass:
klass = self.__class__.__name__ klass = self.__class__.__name__
fq_key = ('{}_{}'.format(klass, key)).upper() fq_key = ("{}_{}".format(klass, key)).upper()
return str(self.settings[fq_key]).format(**self.url_format) return str(self.settings[fq_key]).format(**self.url_format)
def get_url_setting(self, key): def get_url_setting(self, key):
if hasattr(self, 'override_' + key): if hasattr(self, "override_" + key):
return getattr(self, 'override_' + key) return getattr(self, "override_" + key)
key = key if self.in_default_lang else 'lang_%s' % key key = key if self.in_default_lang else "lang_%s" % key
return self._expand_settings(key) return self._expand_settings(key)
def _link_replacer(self, siteurl, m): def _link_replacer(self, siteurl, m):
what = m.group('what') what = m.group("what")
value = urlparse(m.group('value')) value = urlparse(m.group("value"))
path = value.path path = value.path
origin = m.group('path') origin = m.group("path")
# urllib.parse.urljoin() produces `a.html` for urljoin("..", "a.html") # urllib.parse.urljoin() produces `a.html` for urljoin("..", "a.html")
# so if RELATIVE_URLS are enabled, we fall back to os.path.join() to # so if RELATIVE_URLS are enabled, we fall back to os.path.join() to
@ -241,7 +256,7 @@ class Content:
# `baz/http://foo/bar.html` for join("baz", "http://foo/bar.html") # `baz/http://foo/bar.html` for join("baz", "http://foo/bar.html")
# instead of correct "http://foo/bar.html", so one has to pick a side # instead of correct "http://foo/bar.html", so one has to pick a side
# as there is no silver bullet. # as there is no silver bullet.
if self.settings['RELATIVE_URLS']: if self.settings["RELATIVE_URLS"]:
joiner = os.path.join joiner = os.path.join
else: else:
joiner = urljoin joiner = urljoin
@ -251,16 +266,17 @@ class Content:
# os.path.join()), so in order to get a correct answer one needs to # os.path.join()), so in order to get a correct answer one needs to
# append a trailing slash to siteurl in that case. This also makes # append a trailing slash to siteurl in that case. This also makes
# the new behavior fully compatible with Pelican 3.7.1. # the new behavior fully compatible with Pelican 3.7.1.
if not siteurl.endswith('/'): if not siteurl.endswith("/"):
siteurl += '/' siteurl += "/"
# XXX Put this in a different location. # XXX Put this in a different location.
if what in {'filename', 'static', 'attach'}: if what in {"filename", "static", "attach"}:
def _get_linked_content(key, url): def _get_linked_content(key, url):
nonlocal value nonlocal value
def _find_path(path): def _find_path(path):
if path.startswith('/'): if path.startswith("/"):
path = path[1:] path = path[1:]
else: else:
# relative to the source path of this content # relative to the source path of this content
@ -287,59 +303,64 @@ class Content:
return result return result
# check if a static file is linked with {filename} # check if a static file is linked with {filename}
if what == 'filename' and key == 'generated_content': if what == "filename" and key == "generated_content":
linked_content = _get_linked_content('static_content', value) linked_content = _get_linked_content("static_content", value)
if linked_content: if linked_content:
logger.warning( logger.warning(
'{filename} used for linking to static' "{filename} used for linking to static"
' content %s in %s. Use {static} instead', " content %s in %s. Use {static} instead",
value.path, value.path,
self.get_relative_source_path()) self.get_relative_source_path(),
)
return linked_content return linked_content
return None return None
if what == 'filename': if what == "filename":
key = 'generated_content' key = "generated_content"
else: else:
key = 'static_content' key = "static_content"
linked_content = _get_linked_content(key, value) linked_content = _get_linked_content(key, value)
if linked_content: if linked_content:
if what == 'attach': if what == "attach":
linked_content.attach_to(self) linked_content.attach_to(self)
origin = joiner(siteurl, linked_content.url) origin = joiner(siteurl, linked_content.url)
origin = origin.replace('\\', '/') # for Windows paths. origin = origin.replace("\\", "/") # for Windows paths.
else: else:
logger.warning( logger.warning(
"Unable to find '%s', skipping url replacement.", "Unable to find '%s', skipping url replacement.",
value.geturl(), extra={ value.geturl(),
'limit_msg': ("Other resources were not found " extra={
"and their urls not replaced")}) "limit_msg": (
elif what == 'category': "Other resources were not found "
"and their urls not replaced"
)
},
)
elif what == "category":
origin = joiner(siteurl, Category(path, self.settings).url) origin = joiner(siteurl, Category(path, self.settings).url)
elif what == 'tag': elif what == "tag":
origin = joiner(siteurl, Tag(path, self.settings).url) origin = joiner(siteurl, Tag(path, self.settings).url)
elif what == 'index': elif what == "index":
origin = joiner(siteurl, self.settings['INDEX_SAVE_AS']) origin = joiner(siteurl, self.settings["INDEX_SAVE_AS"])
elif what == 'author': elif what == "author":
origin = joiner(siteurl, Author(path, self.settings).url) origin = joiner(siteurl, Author(path, self.settings).url)
else: else:
logger.warning( logger.warning(
"Replacement Indicator '%s' not recognized, " "Replacement Indicator '%s' not recognized, " "skipping replacement",
"skipping replacement", what,
what) )
# keep all other parts, such as query, fragment, etc. # keep all other parts, such as query, fragment, etc.
parts = list(value) parts = list(value)
parts[2] = origin parts[2] = origin
origin = urlunparse(parts) origin = urlunparse(parts)
return ''.join((m.group('markup'), m.group('quote'), origin, return "".join((m.group("markup"), m.group("quote"), origin, m.group("quote")))
m.group('quote')))
def _get_intrasite_link_regex(self): def _get_intrasite_link_regex(self):
intrasite_link_regex = self.settings['INTRASITE_LINK_REGEX'] intrasite_link_regex = self.settings["INTRASITE_LINK_REGEX"]
regex = r""" regex = r"""
(?P<markup><[^\>]+ # match tag with all url-value attributes (?P<markup><[^\>]+ # match tag with all url-value attributes
(?:href|src|poster|data|cite|formaction|action|content)\s*=\s*) (?:href|src|poster|data|cite|formaction|action|content)\s*=\s*)
@ -369,28 +390,28 @@ class Content:
static_links = set() static_links = set()
hrefs = self._get_intrasite_link_regex() hrefs = self._get_intrasite_link_regex()
for m in hrefs.finditer(self._content): for m in hrefs.finditer(self._content):
what = m.group('what') what = m.group("what")
value = urlparse(m.group('value')) value = urlparse(m.group("value"))
path = value.path path = value.path
if what not in {'static', 'attach'}: if what not in {"static", "attach"}:
continue continue
if path.startswith('/'): if path.startswith("/"):
path = path[1:] path = path[1:]
else: else:
# relative to the source path of this content # relative to the source path of this content
path = self.get_relative_source_path( path = self.get_relative_source_path(
os.path.join(self.relative_dir, path) os.path.join(self.relative_dir, path)
) )
path = path.replace('%20', ' ') path = path.replace("%20", " ")
static_links.add(path) static_links.add(path)
return static_links return static_links
def get_siteurl(self): def get_siteurl(self):
return self._context.get('localsiteurl', '') return self._context.get("localsiteurl", "")
@memoized @memoized
def get_content(self, siteurl): def get_content(self, siteurl):
if hasattr(self, '_get_content'): if hasattr(self, "_get_content"):
content = self._get_content() content = self._get_content()
else: else:
content = self._content content = self._content
@ -407,15 +428,17 @@ class Content:
This is based on the summary metadata if set, otherwise truncate the This is based on the summary metadata if set, otherwise truncate the
content. content.
""" """
if 'summary' in self.metadata: if "summary" in self.metadata:
return self.metadata['summary'] return self.metadata["summary"]
if self.settings['SUMMARY_MAX_LENGTH'] is None: if self.settings["SUMMARY_MAX_LENGTH"] is None:
return self.content return self.content
return truncate_html_words(self.content, return truncate_html_words(
self.settings['SUMMARY_MAX_LENGTH'], self.content,
self.settings['SUMMARY_END_SUFFIX']) self.settings["SUMMARY_MAX_LENGTH"],
self.settings["SUMMARY_END_SUFFIX"],
)
@property @property
def summary(self): def summary(self):
@ -424,8 +447,10 @@ class Content:
def _get_summary(self): def _get_summary(self):
"""deprecated function to access summary""" """deprecated function to access summary"""
logger.warning('_get_summary() has been deprecated since 3.6.4. ' logger.warning(
'Use the summary decorator instead') "_get_summary() has been deprecated since 3.6.4. "
"Use the summary decorator instead"
)
return self.summary return self.summary
@summary.setter @summary.setter
@ -444,14 +469,14 @@ class Content:
@property @property
def url(self): def url(self):
return self.get_url_setting('url') return self.get_url_setting("url")
@property @property
def save_as(self): def save_as(self):
return self.get_url_setting('save_as') return self.get_url_setting("save_as")
def _get_template(self): def _get_template(self):
if hasattr(self, 'template') and self.template is not None: if hasattr(self, "template") and self.template is not None:
return self.template return self.template
else: else:
return self.default_template return self.default_template
@ -470,11 +495,10 @@ class Content:
return posixize_path( return posixize_path(
os.path.relpath( os.path.relpath(
os.path.abspath(os.path.join( os.path.abspath(os.path.join(self.settings["PATH"], source_path)),
self.settings['PATH'], os.path.abspath(self.settings["PATH"]),
source_path)), )
os.path.abspath(self.settings['PATH']) )
))
@property @property
def relative_dir(self): def relative_dir(self):
@ -482,85 +506,84 @@ class Content:
os.path.dirname( os.path.dirname(
os.path.relpath( os.path.relpath(
os.path.abspath(self.source_path), os.path.abspath(self.source_path),
os.path.abspath(self.settings['PATH'])))) os.path.abspath(self.settings["PATH"]),
)
)
)
def refresh_metadata_intersite_links(self): def refresh_metadata_intersite_links(self):
for key in self.settings['FORMATTED_FIELDS']: for key in self.settings["FORMATTED_FIELDS"]:
if key in self.metadata and key != 'summary': if key in self.metadata and key != "summary":
value = self._update_content( value = self._update_content(self.metadata[key], self.get_siteurl())
self.metadata[key],
self.get_siteurl()
)
self.metadata[key] = value self.metadata[key] = value
setattr(self, key.lower(), value) setattr(self, key.lower(), value)
# _summary is an internal variable that some plugins may be writing to, # _summary is an internal variable that some plugins may be writing to,
# so ensure changes to it are picked up # so ensure changes to it are picked up
if ('summary' in self.settings['FORMATTED_FIELDS'] and if (
'summary' in self.metadata): "summary" in self.settings["FORMATTED_FIELDS"]
self._summary = self._update_content( and "summary" in self.metadata
self._summary, ):
self.get_siteurl() self._summary = self._update_content(self._summary, self.get_siteurl())
) self.metadata["summary"] = self._summary
self.metadata['summary'] = self._summary
class Page(Content): class Page(Content):
mandatory_properties = ('title',) mandatory_properties = ("title",)
allowed_statuses = ('published', 'hidden', 'draft') allowed_statuses = ("published", "hidden", "draft")
default_status = 'published' default_status = "published"
default_template = 'page' default_template = "page"
def _expand_settings(self, key): def _expand_settings(self, key):
klass = 'draft_page' if self.status == 'draft' else None klass = "draft_page" if self.status == "draft" else None
return super()._expand_settings(key, klass) return super()._expand_settings(key, klass)
class Article(Content): class Article(Content):
mandatory_properties = ('title', 'date', 'category') mandatory_properties = ("title", "date", "category")
allowed_statuses = ('published', 'hidden', 'draft') allowed_statuses = ("published", "hidden", "draft")
default_status = 'published' default_status = "published"
default_template = 'article' default_template = "article"
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
# handle WITH_FUTURE_DATES (designate article to draft based on date) # handle WITH_FUTURE_DATES (designate article to draft based on date)
if not self.settings['WITH_FUTURE_DATES'] and hasattr(self, 'date'): if not self.settings["WITH_FUTURE_DATES"] and hasattr(self, "date"):
if self.date.tzinfo is None: if self.date.tzinfo is None:
now = datetime.datetime.now() now = datetime.datetime.now()
else: else:
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc) now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
if self.date > now: if self.date > now:
self.status = 'draft' self.status = "draft"
# if we are a draft and there is no date provided, set max datetime # if we are a draft and there is no date provided, set max datetime
if not hasattr(self, 'date') and self.status == 'draft': if not hasattr(self, "date") and self.status == "draft":
self.date = datetime.datetime.max.replace(tzinfo=self.timezone) self.date = datetime.datetime.max.replace(tzinfo=self.timezone)
def _expand_settings(self, key): def _expand_settings(self, key):
klass = 'draft' if self.status == 'draft' else 'article' klass = "draft" if self.status == "draft" else "article"
return super()._expand_settings(key, klass) return super()._expand_settings(key, klass)
class Static(Content): class Static(Content):
mandatory_properties = ('title',) mandatory_properties = ("title",)
default_status = 'published' default_status = "published"
default_template = None default_template = None
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._output_location_referenced = False self._output_location_referenced = False
@deprecated_attribute(old='filepath', new='source_path', since=(3, 2, 0)) @deprecated_attribute(old="filepath", new="source_path", since=(3, 2, 0))
def filepath(): def filepath():
return None return None
@deprecated_attribute(old='src', new='source_path', since=(3, 2, 0)) @deprecated_attribute(old="src", new="source_path", since=(3, 2, 0))
def src(): def src():
return None return None
@deprecated_attribute(old='dst', new='save_as', since=(3, 2, 0)) @deprecated_attribute(old="dst", new="save_as", since=(3, 2, 0))
def dst(): def dst():
return None return None
@ -577,8 +600,7 @@ class Static(Content):
return super().save_as return super().save_as
def attach_to(self, content): def attach_to(self, content):
"""Override our output directory with that of the given content object. """Override our output directory with that of the given content object."""
"""
# Determine our file's new output path relative to the linking # Determine our file's new output path relative to the linking
# document. If it currently lives beneath the linking # document. If it currently lives beneath the linking
@ -589,8 +611,7 @@ class Static(Content):
tail_path = os.path.relpath(self.source_path, linking_source_dir) tail_path = os.path.relpath(self.source_path, linking_source_dir)
if tail_path.startswith(os.pardir + os.sep): if tail_path.startswith(os.pardir + os.sep):
tail_path = os.path.basename(tail_path) tail_path = os.path.basename(tail_path)
new_save_as = os.path.join( new_save_as = os.path.join(os.path.dirname(content.save_as), tail_path)
os.path.dirname(content.save_as), tail_path)
# We do not build our new url by joining tail_path with the linking # We do not build our new url by joining tail_path with the linking
# document's url, because we cannot know just by looking at the latter # document's url, because we cannot know just by looking at the latter
@ -609,12 +630,14 @@ class Static(Content):
"%s because %s. Falling back to " "%s because %s. Falling back to "
"{filename} link behavior instead.", "{filename} link behavior instead.",
content.get_relative_source_path(), content.get_relative_source_path(),
self.get_relative_source_path(), reason, self.get_relative_source_path(),
extra={'limit_msg': "More {attach} warnings silenced."}) reason,
extra={"limit_msg": "More {attach} warnings silenced."},
)
# We never override an override, because we don't want to interfere # We never override an override, because we don't want to interfere
# with user-defined overrides that might be in EXTRA_PATH_METADATA. # with user-defined overrides that might be in EXTRA_PATH_METADATA.
if hasattr(self, 'override_save_as') or hasattr(self, 'override_url'): if hasattr(self, "override_save_as") or hasattr(self, "override_url"):
if new_save_as != self.save_as or new_url != self.url: if new_save_as != self.save_as or new_url != self.url:
_log_reason("its output location was already overridden") _log_reason("its output location was already overridden")
return return

File diff suppressed because it is too large Load diff

View file

@ -4,9 +4,7 @@ from collections import defaultdict
from rich.console import Console from rich.console import Console
from rich.logging import RichHandler from rich.logging import RichHandler
__all__ = [ __all__ = ["init"]
'init'
]
console = Console() console = Console()
@ -34,8 +32,8 @@ class LimitFilter(logging.Filter):
return True return True
# extract group # extract group
group = record.__dict__.get('limit_msg', None) group = record.__dict__.get("limit_msg", None)
group_args = record.__dict__.get('limit_args', ()) group_args = record.__dict__.get("limit_args", ())
# ignore record if it was already raised # ignore record if it was already raised
message_key = (record.levelno, record.getMessage()) message_key = (record.levelno, record.getMessage())
@ -50,7 +48,7 @@ class LimitFilter(logging.Filter):
if logger_level > logging.DEBUG: if logger_level > logging.DEBUG:
template_key = (record.levelno, record.msg) template_key = (record.levelno, record.msg)
message_key = (record.levelno, record.getMessage()) message_key = (record.levelno, record.getMessage())
if (template_key in self._ignore or message_key in self._ignore): if template_key in self._ignore or message_key in self._ignore:
return False return False
# check if we went over threshold # check if we went over threshold
@ -90,12 +88,12 @@ class FatalLogger(LimitLogger):
def warning(self, *args, **kwargs): def warning(self, *args, **kwargs):
super().warning(*args, **kwargs) super().warning(*args, **kwargs)
if FatalLogger.warnings_fatal: if FatalLogger.warnings_fatal:
raise RuntimeError('Warning encountered') raise RuntimeError("Warning encountered")
def error(self, *args, **kwargs): def error(self, *args, **kwargs):
super().error(*args, **kwargs) super().error(*args, **kwargs)
if FatalLogger.errors_fatal: if FatalLogger.errors_fatal:
raise RuntimeError('Error encountered') raise RuntimeError("Error encountered")
logging.setLoggerClass(FatalLogger) logging.setLoggerClass(FatalLogger)
@ -103,17 +101,19 @@ logging.setLoggerClass(FatalLogger)
logging.getLogger().__class__ = FatalLogger logging.getLogger().__class__ = FatalLogger
def init(level=None, fatal='', handler=RichHandler(console=console), name=None, def init(
logs_dedup_min_level=None): level=None,
FatalLogger.warnings_fatal = fatal.startswith('warning') fatal="",
handler=RichHandler(console=console),
name=None,
logs_dedup_min_level=None,
):
FatalLogger.warnings_fatal = fatal.startswith("warning")
FatalLogger.errors_fatal = bool(fatal) FatalLogger.errors_fatal = bool(fatal)
LOG_FORMAT = "%(message)s" LOG_FORMAT = "%(message)s"
logging.basicConfig( logging.basicConfig(
level=level, level=level, format=LOG_FORMAT, datefmt="[%H:%M:%S]", handlers=[handler]
format=LOG_FORMAT,
datefmt="[%H:%M:%S]",
handlers=[handler]
) )
logger = logging.getLogger(name) logger = logging.getLogger(name)
@ -126,17 +126,18 @@ def init(level=None, fatal='', handler=RichHandler(console=console), name=None,
def log_warnings(): def log_warnings():
import warnings import warnings
logging.captureWarnings(True) logging.captureWarnings(True)
warnings.simplefilter("default", DeprecationWarning) warnings.simplefilter("default", DeprecationWarning)
init(logging.DEBUG, name='py.warnings') init(logging.DEBUG, name="py.warnings")
if __name__ == '__main__': if __name__ == "__main__":
init(level=logging.DEBUG, name=__name__) init(level=logging.DEBUG, name=__name__)
root_logger = logging.getLogger(__name__) root_logger = logging.getLogger(__name__)
root_logger.debug('debug') root_logger.debug("debug")
root_logger.info('info') root_logger.info("info")
root_logger.warning('warning') root_logger.warning("warning")
root_logger.error('error') root_logger.error("error")
root_logger.critical('critical') root_logger.critical("critical")

View file

@ -6,8 +6,8 @@ from math import ceil
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
PaginationRule = namedtuple( PaginationRule = namedtuple(
'PaginationRule', "PaginationRule",
'min_page URL SAVE_AS', "min_page URL SAVE_AS",
) )
@ -19,7 +19,7 @@ class Paginator:
self.settings = settings self.settings = settings
if per_page: if per_page:
self.per_page = per_page self.per_page = per_page
self.orphans = settings['DEFAULT_ORPHANS'] self.orphans = settings["DEFAULT_ORPHANS"]
else: else:
self.per_page = len(object_list) self.per_page = len(object_list)
self.orphans = 0 self.orphans = 0
@ -32,14 +32,21 @@ class Paginator:
top = bottom + self.per_page top = bottom + self.per_page
if top + self.orphans >= self.count: if top + self.orphans >= self.count:
top = self.count top = self.count
return Page(self.name, self.url, self.object_list[bottom:top], number, return Page(
self, self.settings) self.name,
self.url,
self.object_list[bottom:top],
number,
self,
self.settings,
)
def _get_count(self): def _get_count(self):
"Returns the total number of objects, across all pages." "Returns the total number of objects, across all pages."
if self._count is None: if self._count is None:
self._count = len(self.object_list) self._count = len(self.object_list)
return self._count return self._count
count = property(_get_count) count = property(_get_count)
def _get_num_pages(self): def _get_num_pages(self):
@ -48,6 +55,7 @@ class Paginator:
hits = max(1, self.count - self.orphans) hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / (float(self.per_page) or 1))) self._num_pages = int(ceil(hits / (float(self.per_page) or 1)))
return self._num_pages return self._num_pages
num_pages = property(_get_num_pages) num_pages = property(_get_num_pages)
def _get_page_range(self): def _get_page_range(self):
@ -56,6 +64,7 @@ class Paginator:
a template for loop. a template for loop.
""" """
return list(range(1, self.num_pages + 1)) return list(range(1, self.num_pages + 1))
page_range = property(_get_page_range) page_range = property(_get_page_range)
@ -64,7 +73,7 @@ class Page:
self.full_name = name self.full_name = name
self.name, self.extension = os.path.splitext(name) self.name, self.extension = os.path.splitext(name)
dn, fn = os.path.split(name) dn, fn = os.path.split(name)
self.base_name = dn if fn in ('index.htm', 'index.html') else self.name self.base_name = dn if fn in ("index.htm", "index.html") else self.name
self.base_url = url self.base_url = url
self.object_list = object_list self.object_list = object_list
self.number = number self.number = number
@ -72,7 +81,7 @@ class Page:
self.settings = settings self.settings = settings
def __repr__(self): def __repr__(self):
return '<Page {} of {}>'.format(self.number, self.paginator.num_pages) return "<Page {} of {}>".format(self.number, self.paginator.num_pages)
def has_next(self): def has_next(self):
return self.number < self.paginator.num_pages return self.number < self.paginator.num_pages
@ -117,7 +126,7 @@ class Page:
rule = None rule = None
# find the last matching pagination rule # find the last matching pagination rule
for p in self.settings['PAGINATION_PATTERNS']: for p in self.settings["PAGINATION_PATTERNS"]:
if p.min_page == -1: if p.min_page == -1:
if not self.has_next(): if not self.has_next():
rule = p rule = p
@ -127,22 +136,22 @@ class Page:
rule = p rule = p
if not rule: if not rule:
return '' return ""
prop_value = getattr(rule, key) prop_value = getattr(rule, key)
if not isinstance(prop_value, str): if not isinstance(prop_value, str):
logger.warning('%s is set to %s', key, prop_value) logger.warning("%s is set to %s", key, prop_value)
return prop_value return prop_value
# URL or SAVE_AS is a string, format it with a controlled context # URL or SAVE_AS is a string, format it with a controlled context
context = { context = {
'save_as': self.full_name, "save_as": self.full_name,
'url': self.base_url, "url": self.base_url,
'name': self.name, "name": self.name,
'base_name': self.base_name, "base_name": self.base_name,
'extension': self.extension, "extension": self.extension,
'number': self.number, "number": self.number,
} }
ret = prop_value.format(**context) ret = prop_value.format(**context)
@ -155,9 +164,9 @@ class Page:
# changed to lstrip() because that would remove all leading slashes and # changed to lstrip() because that would remove all leading slashes and
# thus make the workaround impossible. See # thus make the workaround impossible. See
# test_custom_pagination_pattern() for a verification of this. # test_custom_pagination_pattern() for a verification of this.
if ret.startswith('/'): if ret.startswith("/"):
ret = ret[1:] ret = ret[1:]
return ret return ret
url = property(functools.partial(_from_settings, key='URL')) url = property(functools.partial(_from_settings, key="URL"))
save_as = property(functools.partial(_from_settings, key='SAVE_AS')) save_as = property(functools.partial(_from_settings, key="SAVE_AS"))

View file

@ -24,26 +24,26 @@ def get_namespace_plugins(ns_pkg=None):
return { return {
name: importlib.import_module(name) name: importlib.import_module(name)
for finder, name, ispkg for finder, name, ispkg in iter_namespace(ns_pkg)
in iter_namespace(ns_pkg)
if ispkg if ispkg
} }
def list_plugins(ns_pkg=None): def list_plugins(ns_pkg=None):
from pelican.log import init as init_logging from pelican.log import init as init_logging
init_logging(logging.INFO) init_logging(logging.INFO)
ns_plugins = get_namespace_plugins(ns_pkg) ns_plugins = get_namespace_plugins(ns_pkg)
if ns_plugins: if ns_plugins:
logger.info('Plugins found:\n' + '\n'.join(ns_plugins)) logger.info("Plugins found:\n" + "\n".join(ns_plugins))
else: else:
logger.info('No plugins are installed') logger.info("No plugins are installed")
def load_legacy_plugin(plugin, plugin_paths): def load_legacy_plugin(plugin, plugin_paths):
if '.' in plugin: if "." in plugin:
# it is in a package, try to resolve package first # it is in a package, try to resolve package first
package, _, _ = plugin.rpartition('.') package, _, _ = plugin.rpartition(".")
load_legacy_plugin(package, plugin_paths) load_legacy_plugin(package, plugin_paths)
# Try to find plugin in PLUGIN_PATHS # Try to find plugin in PLUGIN_PATHS
@ -52,7 +52,7 @@ def load_legacy_plugin(plugin, plugin_paths):
# If failed, try to find it in normal importable locations # If failed, try to find it in normal importable locations
spec = importlib.util.find_spec(plugin) spec = importlib.util.find_spec(plugin)
if spec is None: if spec is None:
raise ImportError('Cannot import plugin `{}`'.format(plugin)) raise ImportError("Cannot import plugin `{}`".format(plugin))
else: else:
# Avoid loading the same plugin twice # Avoid loading the same plugin twice
if spec.name in sys.modules: if spec.name in sys.modules:
@ -78,30 +78,28 @@ def load_legacy_plugin(plugin, plugin_paths):
def load_plugins(settings): def load_plugins(settings):
logger.debug('Finding namespace plugins') logger.debug("Finding namespace plugins")
namespace_plugins = get_namespace_plugins() namespace_plugins = get_namespace_plugins()
if namespace_plugins: if namespace_plugins:
logger.debug('Namespace plugins found:\n' + logger.debug("Namespace plugins found:\n" + "\n".join(namespace_plugins))
'\n'.join(namespace_plugins))
plugins = [] plugins = []
if settings.get('PLUGINS') is not None: if settings.get("PLUGINS") is not None:
for plugin in settings['PLUGINS']: for plugin in settings["PLUGINS"]:
if isinstance(plugin, str): if isinstance(plugin, str):
logger.debug('Loading plugin `%s`', plugin) logger.debug("Loading plugin `%s`", plugin)
# try to find in namespace plugins # try to find in namespace plugins
if plugin in namespace_plugins: if plugin in namespace_plugins:
plugin = namespace_plugins[plugin] plugin = namespace_plugins[plugin]
elif 'pelican.plugins.{}'.format(plugin) in namespace_plugins: elif "pelican.plugins.{}".format(plugin) in namespace_plugins:
plugin = namespace_plugins['pelican.plugins.{}'.format( plugin = namespace_plugins["pelican.plugins.{}".format(plugin)]
plugin)]
# try to import it # try to import it
else: else:
try: try:
plugin = load_legacy_plugin( plugin = load_legacy_plugin(
plugin, plugin, settings.get("PLUGIN_PATHS", [])
settings.get('PLUGIN_PATHS', [])) )
except ImportError as e: except ImportError as e:
logger.error('Cannot load plugin `%s`\n%s', plugin, e) logger.error("Cannot load plugin `%s`\n%s", plugin, e)
continue continue
plugins.append(plugin) plugins.append(plugin)
else: else:

View file

@ -2,48 +2,48 @@ from blinker import signal
# Run-level signals: # Run-level signals:
initialized = signal('pelican_initialized') initialized = signal("pelican_initialized")
get_generators = signal('get_generators') get_generators = signal("get_generators")
all_generators_finalized = signal('all_generators_finalized') all_generators_finalized = signal("all_generators_finalized")
get_writer = signal('get_writer') get_writer = signal("get_writer")
finalized = signal('pelican_finalized') finalized = signal("pelican_finalized")
# Reader-level signals # Reader-level signals
readers_init = signal('readers_init') readers_init = signal("readers_init")
# Generator-level signals # Generator-level signals
generator_init = signal('generator_init') generator_init = signal("generator_init")
article_generator_init = signal('article_generator_init') article_generator_init = signal("article_generator_init")
article_generator_pretaxonomy = signal('article_generator_pretaxonomy') article_generator_pretaxonomy = signal("article_generator_pretaxonomy")
article_generator_finalized = signal('article_generator_finalized') article_generator_finalized = signal("article_generator_finalized")
article_generator_write_article = signal('article_generator_write_article') article_generator_write_article = signal("article_generator_write_article")
article_writer_finalized = signal('article_writer_finalized') article_writer_finalized = signal("article_writer_finalized")
page_generator_init = signal('page_generator_init') page_generator_init = signal("page_generator_init")
page_generator_finalized = signal('page_generator_finalized') page_generator_finalized = signal("page_generator_finalized")
page_generator_write_page = signal('page_generator_write_page') page_generator_write_page = signal("page_generator_write_page")
page_writer_finalized = signal('page_writer_finalized') page_writer_finalized = signal("page_writer_finalized")
static_generator_init = signal('static_generator_init') static_generator_init = signal("static_generator_init")
static_generator_finalized = signal('static_generator_finalized') static_generator_finalized = signal("static_generator_finalized")
# Page-level signals # Page-level signals
article_generator_preread = signal('article_generator_preread') article_generator_preread = signal("article_generator_preread")
article_generator_context = signal('article_generator_context') article_generator_context = signal("article_generator_context")
page_generator_preread = signal('page_generator_preread') page_generator_preread = signal("page_generator_preread")
page_generator_context = signal('page_generator_context') page_generator_context = signal("page_generator_context")
static_generator_preread = signal('static_generator_preread') static_generator_preread = signal("static_generator_preread")
static_generator_context = signal('static_generator_context') static_generator_context = signal("static_generator_context")
content_object_init = signal('content_object_init') content_object_init = signal("content_object_init")
# Writers signals # Writers signals
content_written = signal('content_written') content_written = signal("content_written")
feed_generated = signal('feed_generated') feed_generated = signal("feed_generated")
feed_written = signal('feed_written') feed_written = signal("feed_written")

View file

@ -31,33 +31,29 @@ except ImportError:
_DISCARD = object() _DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = { DUPLICATES_DEFINITIONS_ALLOWED = {
'tags': False, "tags": False,
'date': False, "date": False,
'modified': False, "modified": False,
'status': False, "status": False,
'category': False, "category": False,
'author': False, "author": False,
'save_as': False, "save_as": False,
'url': False, "url": False,
'authors': False, "authors": False,
'slug': False "slug": False,
} }
METADATA_PROCESSORS = { METADATA_PROCESSORS = {
'tags': lambda x, y: ([ "tags": lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)] or _DISCARD),
Tag(tag, y) "date": lambda x, y: get_date(x.replace("_", " ")),
for tag in ensure_metadata_list(x) "modified": lambda x, y: get_date(x),
] or _DISCARD), "status": lambda x, y: x.strip() or _DISCARD,
'date': lambda x, y: get_date(x.replace('_', ' ')), "category": lambda x, y: _process_if_nonempty(Category, x, y),
'modified': lambda x, y: get_date(x), "author": lambda x, y: _process_if_nonempty(Author, x, y),
'status': lambda x, y: x.strip() or _DISCARD, "authors": lambda x, y: (
'category': lambda x, y: _process_if_nonempty(Category, x, y), [Author(author, y) for author in ensure_metadata_list(x)] or _DISCARD
'author': lambda x, y: _process_if_nonempty(Author, x, y), ),
'authors': lambda x, y: ([ "slug": lambda x, y: x.strip() or _DISCARD,
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
} }
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -65,25 +61,23 @@ logger = logging.getLogger(__name__)
def ensure_metadata_list(text): def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works """Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list, the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string; those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons; if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John" author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format. format.
Regardless, all list items undergo .strip() before returning, and Regardless, all list items undergo .strip() before returning, and
empty items are discarded. empty items are discarded.
""" """
if isinstance(text, str): if isinstance(text, str):
if ';' in text: if ";" in text:
text = text.split(';') text = text.split(";")
else: else:
text = text.split(',') text = text.split(",")
return list(OrderedDict.fromkeys( return list(OrderedDict.fromkeys([v for v in (w.strip() for w in text) if v]))
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings): def _process_if_nonempty(processor, name, settings):
@ -112,8 +106,9 @@ class BaseReader:
Markdown). Markdown).
""" """
enabled = True enabled = True
file_extensions = ['static'] file_extensions = ["static"]
extensions = None extensions = None
def __init__(self, settings): def __init__(self, settings):
@ -132,13 +127,12 @@ class BaseReader:
class _FieldBodyTranslator(HTMLTranslator): class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document): def __init__(self, document):
super().__init__(document) super().__init__(document)
self.compact_p = None self.compact_p = None
def astext(self): def astext(self):
return ''.join(self.body) return "".join(self.body)
def visit_field_body(self, node): def visit_field_body(self, node):
pass pass
@ -154,27 +148,25 @@ def render_node_to_html(document, node, field_body_translator_class):
class PelicanHTMLWriter(Writer): class PelicanHTMLWriter(Writer):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.translator_class = PelicanHTMLTranslator self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator): class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node): def visit_abbreviation(self, node):
attrs = {} attrs = {}
if node.hasattr('explanation'): if node.hasattr("explanation"):
attrs['title'] = node['explanation'] attrs["title"] = node["explanation"]
self.body.append(self.starttag(node, 'abbr', '', **attrs)) self.body.append(self.starttag(node, "abbr", "", **attrs))
def depart_abbreviation(self, node): def depart_abbreviation(self, node):
self.body.append('</abbr>') self.body.append("</abbr>")
def visit_image(self, node): def visit_image(self, node):
# set an empty alt if alt is not specified # set an empty alt if alt is not specified
# avoids that alt is taken from src # avoids that alt is taken from src
node['alt'] = node.get('alt', '') node["alt"] = node.get("alt", "")
return HTMLTranslator.visit_image(self, node) return HTMLTranslator.visit_image(self, node)
@ -194,7 +186,7 @@ class RstReader(BaseReader):
""" """
enabled = bool(docutils) enabled = bool(docutils)
file_extensions = ['rst'] file_extensions = ["rst"]
writer_class = PelicanHTMLWriter writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator field_body_translator_class = _FieldBodyTranslator
@ -202,25 +194,28 @@ class RstReader(BaseReader):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
lang_code = self.settings.get('DEFAULT_LANG', 'en') lang_code = self.settings.get("DEFAULT_LANG", "en")
if get_docutils_lang(lang_code): if get_docutils_lang(lang_code):
self._language_code = lang_code self._language_code = lang_code
else: else:
logger.warning("Docutils has no localization for '%s'." logger.warning(
" Using 'en' instead.", lang_code) "Docutils has no localization for '%s'." " Using 'en' instead.",
self._language_code = 'en' lang_code,
)
self._language_code = "en"
def _parse_metadata(self, document, source_path): def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata""" """Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS'] formatted_fields = self.settings["FORMATTED_FIELDS"]
output = {} output = {}
if document.first_child_matching_class(docutils.nodes.title) is None: if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning( logger.warning(
'Document title missing in file %s: ' "Document title missing in file %s: "
'Ensure exactly one top level section', "Ensure exactly one top level section",
source_path) source_path,
)
try: try:
# docutils 0.18.1+ # docutils 0.18.1+
@ -231,16 +226,16 @@ class RstReader(BaseReader):
for docinfo in nodes: for docinfo in nodes:
for element in docinfo.children: for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary) if element.tagname == "field": # custom fields (e.g. summary)
name_elem, body_elem = element.children name_elem, body_elem = element.children
name = name_elem.astext() name = name_elem.astext()
if name.lower() in formatted_fields: if name.lower() in formatted_fields:
value = render_node_to_html( value = render_node_to_html(
document, body_elem, document, body_elem, self.field_body_translator_class
self.field_body_translator_class) )
else: else:
value = body_elem.astext() value = body_elem.astext()
elif element.tagname == 'authors': # author list elif element.tagname == "authors": # author list
name = element.tagname name = element.tagname
value = [element.astext() for element in element.children] value = [element.astext() for element in element.children]
else: # standard fields (e.g. address) else: # standard fields (e.g. address)
@ -252,22 +247,24 @@ class RstReader(BaseReader):
return output return output
def _get_publisher(self, source_path): def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2', extra_params = {
'syntax_highlight': 'short', "initial_header_level": "2",
'input_encoding': 'utf-8', "syntax_highlight": "short",
'language_code': self._language_code, "input_encoding": "utf-8",
'halt_level': 2, "language_code": self._language_code,
'traceback': True, "halt_level": 2,
'warning_stream': StringIO(), "traceback": True,
'embed_stylesheet': False} "warning_stream": StringIO(),
user_params = self.settings.get('DOCUTILS_SETTINGS') "embed_stylesheet": False,
}
user_params = self.settings.get("DOCUTILS_SETTINGS")
if user_params: if user_params:
extra_params.update(user_params) extra_params.update(user_params)
pub = docutils.core.Publisher( pub = docutils.core.Publisher(
writer=self.writer_class(), writer=self.writer_class(), destination_class=docutils.io.StringOutput
destination_class=docutils.io.StringOutput) )
pub.set_components('standalone', 'restructuredtext', 'html') pub.set_components("standalone", "restructuredtext", "html")
pub.process_programmatic_settings(None, extra_params, None) pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path) pub.set_source(source_path=source_path)
pub.publish() pub.publish()
@ -277,10 +274,10 @@ class RstReader(BaseReader):
"""Parses restructured text""" """Parses restructured text"""
pub = self._get_publisher(source_path) pub = self._get_publisher(source_path)
parts = pub.writer.parts parts = pub.writer.parts
content = parts.get('body') content = parts.get("body")
metadata = self._parse_metadata(pub.document, source_path) metadata = self._parse_metadata(pub.document, source_path)
metadata.setdefault('title', parts.get('title')) metadata.setdefault("title", parts.get("title"))
return content, metadata return content, metadata
@ -289,26 +286,26 @@ class MarkdownReader(BaseReader):
"""Reader for Markdown files""" """Reader for Markdown files"""
enabled = bool(Markdown) enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown'] file_extensions = ["md", "markdown", "mkd", "mdown"]
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
settings = self.settings['MARKDOWN'] settings = self.settings["MARKDOWN"]
settings.setdefault('extension_configs', {}) settings.setdefault("extension_configs", {})
settings.setdefault('extensions', []) settings.setdefault("extensions", [])
for extension in settings['extension_configs'].keys(): for extension in settings["extension_configs"].keys():
if extension not in settings['extensions']: if extension not in settings["extensions"]:
settings['extensions'].append(extension) settings["extensions"].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']: if "markdown.extensions.meta" not in settings["extensions"]:
settings['extensions'].append('markdown.extensions.meta') settings["extensions"].append("markdown.extensions.meta")
self._source_path = None self._source_path = None
def _parse_metadata(self, meta): def _parse_metadata(self, meta):
"""Return the dict containing document metadata""" """Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS'] formatted_fields = self.settings["FORMATTED_FIELDS"]
# prevent metadata extraction in fields # prevent metadata extraction in fields
self._md.preprocessors.deregister('meta') self._md.preprocessors.deregister("meta")
output = {} output = {}
for name, value in meta.items(): for name, value in meta.items():
@ -323,9 +320,10 @@ class MarkdownReader(BaseReader):
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True): elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1: if len(value) > 1:
logger.warning( logger.warning(
'Duplicate definition of `%s` ' "Duplicate definition of `%s` " "for %s. Using first one.",
'for %s. Using first one.', name,
name, self._source_path) self._source_path,
)
output[name] = self.process_metadata(name, value[0]) output[name] = self.process_metadata(name, value[0])
elif len(value) > 1: elif len(value) > 1:
# handle list metadata as list of string # handle list metadata as list of string
@ -339,11 +337,11 @@ class MarkdownReader(BaseReader):
"""Parse content and metadata of markdown files""" """Parse content and metadata of markdown files"""
self._source_path = source_path self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN']) self._md = Markdown(**self.settings["MARKDOWN"])
with pelican_open(source_path) as text: with pelican_open(source_path) as text:
content = self._md.convert(text) content = self._md.convert(text)
if hasattr(self._md, 'Meta'): if hasattr(self._md, "Meta"):
metadata = self._parse_metadata(self._md.Meta) metadata = self._parse_metadata(self._md.Meta)
else: else:
metadata = {} metadata = {}
@ -353,17 +351,17 @@ class MarkdownReader(BaseReader):
class HTMLReader(BaseReader): class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags""" """Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html'] file_extensions = ["htm", "html"]
enabled = True enabled = True
class _HTMLParser(HTMLParser): class _HTMLParser(HTMLParser):
def __init__(self, settings, filename): def __init__(self, settings, filename):
super().__init__(convert_charrefs=False) super().__init__(convert_charrefs=False)
self.body = '' self.body = ""
self.metadata = {} self.metadata = {}
self.settings = settings self.settings = settings
self._data_buffer = '' self._data_buffer = ""
self._filename = filename self._filename = filename
@ -374,59 +372,59 @@ class HTMLReader(BaseReader):
self._in_tags = False self._in_tags = False
def handle_starttag(self, tag, attrs): def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level: if tag == "head" and self._in_top_level:
self._in_top_level = False self._in_top_level = False
self._in_head = True self._in_head = True
elif tag == 'title' and self._in_head: elif tag == "title" and self._in_head:
self._in_title = True self._in_title = True
self._data_buffer = '' self._data_buffer = ""
elif tag == 'body' and self._in_top_level: elif tag == "body" and self._in_top_level:
self._in_top_level = False self._in_top_level = False
self._in_body = True self._in_body = True
self._data_buffer = '' self._data_buffer = ""
elif tag == 'meta' and self._in_head: elif tag == "meta" and self._in_head:
self._handle_meta_tag(attrs) self._handle_meta_tag(attrs)
elif self._in_body: elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False) self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag): def handle_endtag(self, tag):
if tag == 'head': if tag == "head":
if self._in_head: if self._in_head:
self._in_head = False self._in_head = False
self._in_top_level = True self._in_top_level = True
elif self._in_head and tag == 'title': elif self._in_head and tag == "title":
self._in_title = False self._in_title = False
self.metadata['title'] = self._data_buffer self.metadata["title"] = self._data_buffer
elif tag == 'body': elif tag == "body":
self.body = self._data_buffer self.body = self._data_buffer
self._in_body = False self._in_body = False
self._in_top_level = True self._in_top_level = True
elif self._in_body: elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag)) self._data_buffer += "</{}>".format(escape(tag))
def handle_startendtag(self, tag, attrs): def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head: if tag == "meta" and self._in_head:
self._handle_meta_tag(attrs) self._handle_meta_tag(attrs)
if self._in_body: if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True) self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data): def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data) self._data_buffer += "<!--{}-->".format(data)
def handle_data(self, data): def handle_data(self, data):
self._data_buffer += data self._data_buffer += data
def handle_entityref(self, data): def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data) self._data_buffer += "&{};".format(data)
def handle_charref(self, data): def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data) self._data_buffer += "&#{};".format(data)
def build_tag(self, tag, attrs, close_tag): def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag)) result = "<{}".format(escape(tag))
for k, v in attrs: for k, v in attrs:
result += ' ' + escape(k) result += " " + escape(k)
if v is not None: if v is not None:
# If the attribute value contains a double quote, surround # If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes. # with single quotes, otherwise use double quotes.
@ -435,33 +433,39 @@ class HTMLReader(BaseReader):
else: else:
result += '="{}"'.format(escape(v, quote=False)) result += '="{}"'.format(escape(v, quote=False))
if close_tag: if close_tag:
return result + ' />' return result + " />"
return result + '>' return result + ">"
def _handle_meta_tag(self, attrs): def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name') name = self._attr_value(attrs, "name")
if name is None: if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs] attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list) attr_serialized = ", ".join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' " logger.warning(
"attribute, skipping. Attributes: %s", "Meta tag in file %s does not have a 'name' "
self._filename, attr_serialized) "attribute, skipping. Attributes: %s",
self._filename,
attr_serialized,
)
return return
name = name.lower() name = name.lower()
contents = self._attr_value(attrs, 'content', '') contents = self._attr_value(attrs, "content", "")
if not contents: if not contents:
contents = self._attr_value(attrs, 'contents', '') contents = self._attr_value(attrs, "contents", "")
if contents: if contents:
logger.warning( logger.warning(
"Meta tag attribute 'contents' used in file %s, should" "Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'", " be changed to 'content'",
self._filename, self._filename,
extra={'limit_msg': "Other files have meta tag " extra={
"attribute 'contents' that should " "limit_msg": "Other files have meta tag "
"be changed to 'content'"}) "attribute 'contents' that should "
"be changed to 'content'"
},
)
if name == 'keywords': if name == "keywords":
name = 'tags' name = "tags"
if name in self.metadata: if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the # if this metadata already exists (i.e. a previous tag with the
@ -501,22 +505,23 @@ class Readers(FileStampDataCacher):
""" """
def __init__(self, settings=None, cache_name=''): def __init__(self, settings=None, cache_name=""):
self.settings = settings or {} self.settings = settings or {}
self.readers = {} self.readers = {}
self.reader_classes = {} self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__(): for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled: if not cls.enabled:
logger.debug('Missing dependencies for %s', logger.debug(
', '.join(cls.file_extensions)) "Missing dependencies for %s", ", ".join(cls.file_extensions)
)
continue continue
for ext in cls.file_extensions: for ext in cls.file_extensions:
self.reader_classes[ext] = cls self.reader_classes[ext] = cls
if self.settings['READERS']: if self.settings["READERS"]:
self.reader_classes.update(self.settings['READERS']) self.reader_classes.update(self.settings["READERS"])
signals.readers_init.send(self) signals.readers_init.send(self)
@ -527,53 +532,67 @@ class Readers(FileStampDataCacher):
self.readers[fmt] = reader_class(self.settings) self.readers[fmt] = reader_class(self.settings)
# set up caching # set up caching
cache_this_level = (cache_name != '' and cache_this_level = (
self.settings['CONTENT_CACHING_LAYER'] == 'reader') cache_name != "" and self.settings["CONTENT_CACHING_LAYER"] == "reader"
caching_policy = cache_this_level and self.settings['CACHE_CONTENT'] )
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE'] caching_policy = cache_this_level and self.settings["CACHE_CONTENT"]
load_policy = cache_this_level and self.settings["LOAD_CONTENT_CACHE"]
super().__init__(settings, cache_name, caching_policy, load_policy) super().__init__(settings, cache_name, caching_policy, load_policy)
@property @property
def extensions(self): def extensions(self):
return self.readers.keys() return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None, def read_file(
context=None, preread_signal=None, preread_sender=None, self,
context_signal=None, context_sender=None): base_path,
path,
content_class=Page,
fmt=None,
context=None,
preread_signal=None,
preread_sender=None,
context_signal=None,
context_sender=None,
):
"""Return a content object parsed with the given format.""" """Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path)) path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path)) source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug( logger.debug("Read file %s -> %s", source_path, content_class.__name__)
'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt: if not fmt:
_, ext = os.path.splitext(os.path.basename(path)) _, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:] fmt = ext[1:]
if fmt not in self.readers: if fmt not in self.readers:
raise TypeError( raise TypeError("Pelican does not know how to parse %s", path)
'Pelican does not know how to parse %s', path)
if preread_signal: if preread_signal:
logger.debug( logger.debug("Signal %s.send(%s)", preread_signal.name, preread_sender)
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender) preread_signal.send(preread_sender)
reader = self.readers[fmt] reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata( metadata = _filter_discardable_metadata(
settings=self.settings, process=reader.process_metadata)) default_metadata(settings=self.settings, process=reader.process_metadata)
metadata.update(path_metadata( )
full_path=path, source_path=source_path, metadata.update(
settings=self.settings)) path_metadata(
metadata.update(_filter_discardable_metadata(parse_path_metadata( full_path=path, source_path=source_path, settings=self.settings
source_path=source_path, settings=self.settings, )
process=reader.process_metadata))) )
metadata.update(
_filter_discardable_metadata(
parse_path_metadata(
source_path=source_path,
settings=self.settings,
process=reader.process_metadata,
)
)
)
reader_name = reader.__class__.__name__ reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower() metadata["reader"] = reader_name.replace("Reader", "").lower()
content, reader_metadata = self.get_cached_data(path, (None, None)) content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None: if content is None:
@ -587,14 +606,14 @@ class Readers(FileStampDataCacher):
find_empty_alt(content, path) find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so # eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']: if self.settings["TYPOGRIFY"]:
from typogrify.filters import typogrify from typogrify.filters import typogrify
import smartypants import smartypants
typogrify_dashes = self.settings['TYPOGRIFY_DASHES'] typogrify_dashes = self.settings["TYPOGRIFY_DASHES"]
if typogrify_dashes == 'oldschool': if typogrify_dashes == "oldschool":
smartypants.Attr.default = smartypants.Attr.set2 smartypants.Attr.default = smartypants.Attr.set2
elif typogrify_dashes == 'oldschool_inverted': elif typogrify_dashes == "oldschool_inverted":
smartypants.Attr.default = smartypants.Attr.set3 smartypants.Attr.default = smartypants.Attr.set3
else: else:
smartypants.Attr.default = smartypants.Attr.set1 smartypants.Attr.default = smartypants.Attr.set1
@ -608,31 +627,32 @@ class Readers(FileStampDataCacher):
def typogrify_wrapper(text): def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible""" """Ensures ignore_tags feature is backward compatible"""
try: try:
return typogrify( return typogrify(text, self.settings["TYPOGRIFY_IGNORE_TAGS"])
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError: except TypeError:
return typogrify(text) return typogrify(text)
if content: if content:
content = typogrify_wrapper(content) content = typogrify_wrapper(content)
if 'title' in metadata: if "title" in metadata:
metadata['title'] = typogrify_wrapper(metadata['title']) metadata["title"] = typogrify_wrapper(metadata["title"])
if 'summary' in metadata: if "summary" in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary']) metadata["summary"] = typogrify_wrapper(metadata["summary"])
if context_signal: if context_signal:
logger.debug( logger.debug(
'Signal %s.send(%s, <metadata>)', "Signal %s.send(%s, <metadata>)", context_signal.name, context_sender
context_signal.name, )
context_sender)
context_signal.send(context_sender, metadata=metadata) context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata, return content_class(
settings=self.settings, source_path=path, content=content,
context=context) metadata=metadata,
settings=self.settings,
source_path=path,
context=context,
)
def find_empty_alt(content, path): def find_empty_alt(content, path):
@ -642,7 +662,8 @@ def find_empty_alt(content, path):
as they are really likely to be accessibility flaws. as they are really likely to be accessibility flaws.
""" """
imgs = re.compile(r""" imgs = re.compile(
r"""
(?: (?:
# src before alt # src before alt
<img <img
@ -658,53 +679,57 @@ def find_empty_alt(content, path):
[^\>]* [^\>]*
src=(['"])(.*?)\5 src=(['"])(.*?)\5
) )
""", re.X) """,
re.X,
)
for match in re.findall(imgs, content): for match in re.findall(imgs, content):
logger.warning( logger.warning(
'Empty alt attribute for image %s in %s', "Empty alt attribute for image %s in %s",
os.path.basename(match[1] + match[5]), path, os.path.basename(match[1] + match[5]),
extra={'limit_msg': 'Other images have empty alt attributes'}) path,
extra={"limit_msg": "Other images have empty alt attributes"},
)
def default_metadata(settings=None, process=None): def default_metadata(settings=None, process=None):
metadata = {} metadata = {}
if settings: if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items(): for name, value in dict(settings.get("DEFAULT_METADATA", {})).items():
if process: if process:
value = process(name, value) value = process(name, value)
metadata[name] = value metadata[name] = value
if 'DEFAULT_CATEGORY' in settings: if "DEFAULT_CATEGORY" in settings:
value = settings['DEFAULT_CATEGORY'] value = settings["DEFAULT_CATEGORY"]
if process: if process:
value = process('category', value) value = process("category", value)
metadata['category'] = value metadata["category"] = value
if settings.get('DEFAULT_DATE', None) and \ if settings.get("DEFAULT_DATE", None) and settings["DEFAULT_DATE"] != "fs":
settings['DEFAULT_DATE'] != 'fs': if isinstance(settings["DEFAULT_DATE"], str):
if isinstance(settings['DEFAULT_DATE'], str): metadata["date"] = get_date(settings["DEFAULT_DATE"])
metadata['date'] = get_date(settings['DEFAULT_DATE'])
else: else:
metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE']) metadata["date"] = datetime.datetime(*settings["DEFAULT_DATE"])
return metadata return metadata
def path_metadata(full_path, source_path, settings=None): def path_metadata(full_path, source_path, settings=None):
metadata = {} metadata = {}
if settings: if settings:
if settings.get('DEFAULT_DATE', None) == 'fs': if settings.get("DEFAULT_DATE", None) == "fs":
metadata['date'] = datetime.datetime.fromtimestamp( metadata["date"] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime) os.stat(full_path).st_mtime
metadata['modified'] = metadata['date'] )
metadata["modified"] = metadata["date"]
# Apply EXTRA_PATH_METADATA for the source path and the paths of any # Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific # parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts. # path wins conflicts.
epm = settings.get('EXTRA_PATH_METADATA', {}) epm = settings.get("EXTRA_PATH_METADATA", {})
for path, meta in sorted(epm.items()): for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories. # Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name # This prevents false positives when one file or directory's name
# is a prefix of another's. # is a prefix of another's.
dirpath = posixize_path(os.path.join(path, '')) dirpath = posixize_path(os.path.join(path, ""))
if source_path == path or source_path.startswith(dirpath): if source_path == path or source_path.startswith(dirpath):
metadata.update(meta) metadata.update(meta)
@ -736,11 +761,10 @@ def parse_path_metadata(source_path, settings=None, process=None):
subdir = os.path.basename(dirname) subdir = os.path.basename(dirname)
if settings: if settings:
checks = [] checks = []
for key, data in [('FILENAME_METADATA', base), for key, data in [("FILENAME_METADATA", base), ("PATH_METADATA", source_path)]:
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data)) checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None): if settings.get("USE_FOLDER_AS_CATEGORY", None):
checks.append(('(?P<category>.*)', subdir)) checks.append(("(?P<category>.*)", subdir))
for regexp, data in checks: for regexp, data in checks:
if regexp and data: if regexp and data:
match = re.match(regexp, data) match = re.match(regexp, data)

View file

@ -11,26 +11,26 @@ import pelican.settings as pys
class Pygments(Directive): class Pygments(Directive):
""" Source code syntax highlighting. """Source code syntax highlighting."""
"""
required_arguments = 1 required_arguments = 1
optional_arguments = 0 optional_arguments = 0
final_argument_whitespace = True final_argument_whitespace = True
option_spec = { option_spec = {
'anchorlinenos': directives.flag, "anchorlinenos": directives.flag,
'classprefix': directives.unchanged, "classprefix": directives.unchanged,
'hl_lines': directives.unchanged, "hl_lines": directives.unchanged,
'lineanchors': directives.unchanged, "lineanchors": directives.unchanged,
'linenos': directives.unchanged, "linenos": directives.unchanged,
'linenospecial': directives.nonnegative_int, "linenospecial": directives.nonnegative_int,
'linenostart': directives.nonnegative_int, "linenostart": directives.nonnegative_int,
'linenostep': directives.nonnegative_int, "linenostep": directives.nonnegative_int,
'lineseparator': directives.unchanged, "lineseparator": directives.unchanged,
'linespans': directives.unchanged, "linespans": directives.unchanged,
'nobackground': directives.flag, "nobackground": directives.flag,
'nowrap': directives.flag, "nowrap": directives.flag,
'tagsfile': directives.unchanged, "tagsfile": directives.unchanged,
'tagurlformat': directives.unchanged, "tagurlformat": directives.unchanged,
} }
has_content = True has_content = True
@ -49,28 +49,30 @@ class Pygments(Directive):
if k not in self.options: if k not in self.options:
self.options[k] = v self.options[k] = v
if ('linenos' in self.options and if "linenos" in self.options and self.options["linenos"] not in (
self.options['linenos'] not in ('table', 'inline')): "table",
if self.options['linenos'] == 'none': "inline",
self.options.pop('linenos') ):
if self.options["linenos"] == "none":
self.options.pop("linenos")
else: else:
self.options['linenos'] = 'table' self.options["linenos"] = "table"
for flag in ('nowrap', 'nobackground', 'anchorlinenos'): for flag in ("nowrap", "nobackground", "anchorlinenos"):
if flag in self.options: if flag in self.options:
self.options[flag] = True self.options[flag] = True
# noclasses should already default to False, but just in case... # noclasses should already default to False, but just in case...
formatter = HtmlFormatter(noclasses=False, **self.options) formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter) parsed = highlight("\n".join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')] return [nodes.raw("", parsed, format="html")]
directives.register_directive('code-block', Pygments) directives.register_directive("code-block", Pygments)
directives.register_directive('sourcecode', Pygments) directives.register_directive("sourcecode", Pygments)
_abbr_re = re.compile(r'\((.*)\)$', re.DOTALL) _abbr_re = re.compile(r"\((.*)\)$", re.DOTALL)
class abbreviation(nodes.Inline, nodes.TextElement): class abbreviation(nodes.Inline, nodes.TextElement):
@ -82,9 +84,9 @@ def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
m = _abbr_re.search(text) m = _abbr_re.search(text)
if m is None: if m is None:
return [abbreviation(text, text)], [] return [abbreviation(text, text)], []
abbr = text[:m.start()].strip() abbr = text[: m.start()].strip()
expl = m.group(1) expl = m.group(1)
return [abbreviation(abbr, abbr, explanation=expl)], [] return [abbreviation(abbr, abbr, explanation=expl)], []
roles.register_local_role('abbr', abbr_role) roles.register_local_role("abbr", abbr_role)

View file

@ -14,38 +14,47 @@ except ImportError:
from pelican.log import console # noqa: F401 from pelican.log import console # noqa: F401
from pelican.log import init as init_logging from pelican.log import init as init_logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def parse_arguments(): def parse_arguments():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Pelican Development Server', description="Pelican Development Server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"port", default=8000, type=int, nargs="?", help="Port to Listen On"
)
parser.add_argument("server", default="", nargs="?", help="Interface to Listen On")
parser.add_argument("--ssl", action="store_true", help="Activate SSL listener")
parser.add_argument(
"--cert",
default="./cert.pem",
nargs="?",
help="Path to certificate file. " + "Relative to current directory",
)
parser.add_argument(
"--key",
default="./key.pem",
nargs="?",
help="Path to certificate key file. " + "Relative to current directory",
)
parser.add_argument(
"--path",
default=".",
help="Path to pelican source directory to serve. "
+ "Relative to current directory",
) )
parser.add_argument("port", default=8000, type=int, nargs="?",
help="Port to Listen On")
parser.add_argument("server", default="", nargs="?",
help="Interface to Listen On")
parser.add_argument('--ssl', action="store_true",
help='Activate SSL listener')
parser.add_argument('--cert', default="./cert.pem", nargs="?",
help='Path to certificate file. ' +
'Relative to current directory')
parser.add_argument('--key', default="./key.pem", nargs="?",
help='Path to certificate key file. ' +
'Relative to current directory')
parser.add_argument('--path', default=".",
help='Path to pelican source directory to serve. ' +
'Relative to current directory')
return parser.parse_args() return parser.parse_args()
class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler): class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
SUFFIXES = ['.html', '/index.html', '/', ''] SUFFIXES = [".html", "/index.html", "/", ""]
extensions_map = { extensions_map = {
**server.SimpleHTTPRequestHandler.extensions_map, **server.SimpleHTTPRequestHandler.extensions_map,
** { **{
# web fonts # web fonts
".oft": "font/oft", ".oft": "font/oft",
".sfnt": "font/sfnt", ".sfnt": "font/sfnt",
@ -57,13 +66,13 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def translate_path(self, path): def translate_path(self, path):
# abandon query parameters # abandon query parameters
path = path.split('?', 1)[0] path = path.split("?", 1)[0]
path = path.split('#', 1)[0] path = path.split("#", 1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324 # Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/') trailing_slash = path.rstrip().endswith("/")
path = urllib.parse.unquote(path) path = urllib.parse.unquote(path)
path = posixpath.normpath(path) path = posixpath.normpath(path)
words = path.split('/') words = path.split("/")
words = filter(None, words) words = filter(None, words)
path = self.base_path path = self.base_path
for word in words: for word in words:
@ -72,12 +81,12 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
continue continue
path = os.path.join(path, word) path = os.path.join(path, word)
if trailing_slash: if trailing_slash:
path += '/' path += "/"
return path return path
def do_GET(self): def do_GET(self):
# cut off a query string # cut off a query string
original_path = self.path.split('?', 1)[0] original_path = self.path.split("?", 1)[0]
# try to find file # try to find file
self.path = self.get_path_that_exists(original_path) self.path = self.get_path_that_exists(original_path)
@ -88,12 +97,12 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def get_path_that_exists(self, original_path): def get_path_that_exists(self, original_path):
# Try to strip trailing slash # Try to strip trailing slash
trailing_slash = original_path.endswith('/') trailing_slash = original_path.endswith("/")
original_path = original_path.rstrip('/') original_path = original_path.rstrip("/")
# Try to detect file by applying various suffixes # Try to detect file by applying various suffixes
tries = [] tries = []
for suffix in self.SUFFIXES: for suffix in self.SUFFIXES:
if not trailing_slash and suffix == '/': if not trailing_slash and suffix == "/":
# if original request does not have trailing slash, skip the '/' suffix # if original request does not have trailing slash, skip the '/' suffix
# so that base class can redirect if needed # so that base class can redirect if needed
continue continue
@ -101,18 +110,17 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
if os.path.exists(self.translate_path(path)): if os.path.exists(self.translate_path(path)):
return path return path
tries.append(path) tries.append(path)
logger.warning("Unable to find `%s` or variations:\n%s", logger.warning(
original_path, "Unable to find `%s` or variations:\n%s", original_path, "\n".join(tries)
'\n'.join(tries)) )
return None return None
def guess_type(self, path): def guess_type(self, path):
"""Guess at the mime type for the specified file. """Guess at the mime type for the specified file."""
"""
mimetype = server.SimpleHTTPRequestHandler.guess_type(self, path) mimetype = server.SimpleHTTPRequestHandler.guess_type(self, path)
# If the default guess is too generic, try the python-magic library # If the default guess is too generic, try the python-magic library
if mimetype == 'application/octet-stream' and magic_from_file: if mimetype == "application/octet-stream" and magic_from_file:
mimetype = magic_from_file(path, mime=True) mimetype = magic_from_file(path, mime=True)
return mimetype return mimetype
@ -127,31 +135,33 @@ class RootedHTTPServer(server.HTTPServer):
self.RequestHandlerClass.base_path = base_path self.RequestHandlerClass.base_path = base_path
if __name__ == '__main__': if __name__ == "__main__":
init_logging(level=logging.INFO) init_logging(level=logging.INFO)
logger.warning("'python -m pelican.server' is deprecated.\nThe " logger.warning(
"Pelican development server should be run via " "'python -m pelican.server' is deprecated.\nThe "
"'pelican --listen' or 'pelican -l'.\nThis can be combined " "Pelican development server should be run via "
"with regeneration as 'pelican -lr'.\nRerun 'pelican-" "'pelican --listen' or 'pelican -l'.\nThis can be combined "
"quickstart' to get new Makefile and tasks.py files.") "with regeneration as 'pelican -lr'.\nRerun 'pelican-"
"quickstart' to get new Makefile and tasks.py files."
)
args = parse_arguments() args = parse_arguments()
RootedHTTPServer.allow_reuse_address = True RootedHTTPServer.allow_reuse_address = True
try: try:
httpd = RootedHTTPServer( httpd = RootedHTTPServer(
args.path, (args.server, args.port), ComplexHTTPRequestHandler) args.path, (args.server, args.port), ComplexHTTPRequestHandler
)
if args.ssl: if args.ssl:
httpd.socket = ssl.wrap_socket( httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=args.key, httpd.socket, keyfile=args.key, certfile=args.cert, server_side=True
certfile=args.cert, server_side=True) )
except ssl.SSLError as e: except ssl.SSLError as e:
logger.error("Couldn't open certificate file %s or key file %s", logger.error(
args.cert, args.key) "Couldn't open certificate file %s or key file %s", args.cert, args.key
logger.error("Could not listen on port %s, server %s.", )
args.port, args.server) logger.error("Could not listen on port %s, server %s.", args.port, args.server)
sys.exit(getattr(e, 'exitcode', 1)) sys.exit(getattr(e, "exitcode", 1))
logger.info("Serving at port %s, server %s.", logger.info("Serving at port %s, server %s.", args.port, args.server)
args.port, args.server)
try: try:
httpd.serve_forever() httpd.serve_forever()
except KeyboardInterrupt: except KeyboardInterrupt:

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
raise ImportError( raise ImportError(
'Importing from `pelican.signals` is deprecated. ' "Importing from `pelican.signals` is deprecated. "
'Use `from pelican import signals` or `import pelican.plugins.signals` instead.' "Use `from pelican import signals` or `import pelican.plugins.signals` instead."
) )

View file

@ -1,43 +1,47 @@
AUTHOR = 'Alexis Métaireau' AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log" SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org' SITEURL = "http://blog.notmyidea.org"
TIMEZONE = 'UTC' TIMEZONE = "UTC"
GITHUB_URL = 'http://github.com/ametaireau/' GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea" DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True REVERSE_CATEGORY_ORDER = True
DEFAULT_PAGINATION = 2 DEFAULT_PAGINATION = 2
FEED_RSS = 'feeds/all.rss.xml' FEED_RSS = "feeds/all.rss.xml"
CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml' CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
LINKS = (('Biologeek', 'http://biologeek.org'), LINKS = (
('Filyb', "http://filyb.info/"), ("Biologeek", "http://biologeek.org"),
('Libert-fr', "http://www.libert-fr.com"), ("Filyb", "http://filyb.info/"),
('N1k0', "http://prendreuncafe.com/blog/"), ("Libert-fr", "http://www.libert-fr.com"),
('Tarek Ziadé', "http://ziade.org/blog"), ("N1k0", "http://prendreuncafe.com/blog/"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),) ("Tarek Ziadé", "http://ziade.org/blog"),
("Zubin Mithra", "http://zubin71.wordpress.com/"),
)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'), SOCIAL = (
('lastfm', 'http://lastfm.com/user/akounet'), ("twitter", "http://twitter.com/ametaireau"),
('github', 'http://github.com/ametaireau'),) ("lastfm", "http://lastfm.com/user/akounet"),
("github", "http://github.com/ametaireau"),
)
# global metadata to all the contents # global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'} DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata # path-specific metadata
EXTRA_PATH_METADATA = { EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'}, "extra/robots.txt": {"path": "robots.txt"},
} }
# static paths will be copied without parsing their contents # static paths will be copied without parsing their contents
STATIC_PATHS = [ STATIC_PATHS = [
'pictures', "pictures",
'extra/robots.txt', "extra/robots.txt",
] ]
FORMATTED_FIELDS = ['summary', 'custom_formatted_field'] FORMATTED_FIELDS = ["summary", "custom_formatted_field"]
# foobar will not be used, because it's not in caps. All configuration keys # foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps # have to be in caps

View file

@ -1,4 +1,4 @@
NAME = 'namespace plugin' NAME = "namespace plugin"
def register(): def register():

View file

@ -16,7 +16,10 @@ from pelican.contents import Article
from pelican.readers import default_metadata from pelican.readers import default_metadata
from pelican.settings import DEFAULT_CONFIG from pelican.settings import DEFAULT_CONFIG
__all__ = ['get_article', 'unittest', ] __all__ = [
"get_article",
"unittest",
]
@contextmanager @contextmanager
@ -51,7 +54,7 @@ def isplit(s, sep=None):
True True
""" """
sep, hardsep = r'\s+' if sep is None else re.escape(sep), sep is not None sep, hardsep = r"\s+" if sep is None else re.escape(sep), sep is not None
exp, pos, length = re.compile(sep), 0, len(s) exp, pos, length = re.compile(sep), 0, len(s)
while True: while True:
m = exp.search(s, pos) m = exp.search(s, pos)
@ -89,10 +92,8 @@ def mute(returns_output=False):
""" """
def decorator(func): def decorator(func):
@wraps(func) @wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
saved_stdout = sys.stdout saved_stdout = sys.stdout
sys.stdout = StringIO() sys.stdout = StringIO()
@ -112,7 +113,7 @@ def mute(returns_output=False):
def get_article(title, content, **extra_metadata): def get_article(title, content, **extra_metadata):
metadata = default_metadata(settings=DEFAULT_CONFIG) metadata = default_metadata(settings=DEFAULT_CONFIG)
metadata['title'] = title metadata["title"] = title
if extra_metadata: if extra_metadata:
metadata.update(extra_metadata) metadata.update(extra_metadata)
return Article(content, metadata=metadata) return Article(content, metadata=metadata)
@ -125,14 +126,14 @@ def skipIfNoExecutable(executable):
and skips the tests if not found (if subprocess raises a `OSError`). and skips the tests if not found (if subprocess raises a `OSError`).
""" """
with open(os.devnull, 'w') as fnull: with open(os.devnull, "w") as fnull:
try: try:
res = subprocess.call(executable, stdout=fnull, stderr=fnull) res = subprocess.call(executable, stdout=fnull, stderr=fnull)
except OSError: except OSError:
res = None res = None
if res is None: if res is None:
return unittest.skip('{} executable not found'.format(executable)) return unittest.skip("{} executable not found".format(executable))
return lambda func: func return lambda func: func
@ -164,10 +165,7 @@ def can_symlink():
res = True res = True
try: try:
with temporary_folder() as f: with temporary_folder() as f:
os.symlink( os.symlink(f, os.path.join(f, "symlink"))
f,
os.path.join(f, 'symlink')
)
except OSError: except OSError:
res = False res = False
return res return res
@ -186,9 +184,9 @@ def get_settings(**kwargs):
def get_context(settings=None, **kwargs): def get_context(settings=None, **kwargs):
context = settings.copy() if settings else {} context = settings.copy() if settings else {}
context['generated_content'] = {} context["generated_content"] = {}
context['static_links'] = set() context["static_links"] = set()
context['static_content'] = {} context["static_content"] = {}
context.update(kwargs) context.update(kwargs)
return context return context
@ -200,22 +198,24 @@ class LogCountHandler(BufferingHandler):
super().__init__(capacity) super().__init__(capacity)
def count_logs(self, msg=None, level=None): def count_logs(self, msg=None, level=None):
return len([ return len(
rec [
for rec rec
in self.buffer for rec in self.buffer
if (msg is None or re.match(msg, rec.getMessage())) and if (msg is None or re.match(msg, rec.getMessage()))
(level is None or rec.levelno == level) and (level is None or rec.levelno == level)
]) ]
)
def count_formatted_logs(self, msg=None, level=None): def count_formatted_logs(self, msg=None, level=None):
return len([ return len(
rec [
for rec rec
in self.buffer for rec in self.buffer
if (msg is None or re.search(msg, self.format(rec))) and if (msg is None or re.search(msg, self.format(rec)))
(level is None or rec.levelno == level) and (level is None or rec.levelno == level)
]) ]
)
def diff_subproc(first, second): def diff_subproc(first, second):
@ -228,8 +228,16 @@ def diff_subproc(first, second):
>>> didCheckFail = proc.returnCode != 0 >>> didCheckFail = proc.returnCode != 0
""" """
return subprocess.Popen( return subprocess.Popen(
['git', '--no-pager', 'diff', '--no-ext-diff', '--exit-code', [
'-w', first, second], "git",
"--no-pager",
"diff",
"--no-ext-diff",
"--exit-code",
"-w",
first,
second,
],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
text=True, text=True,
@ -251,9 +259,12 @@ class LoggedTestCase(unittest.TestCase):
def assertLogCountEqual(self, count=None, msg=None, **kwargs): def assertLogCountEqual(self, count=None, msg=None, **kwargs):
actual = self._logcount_handler.count_logs(msg=msg, **kwargs) actual = self._logcount_handler.count_logs(msg=msg, **kwargs)
self.assertEqual( self.assertEqual(
actual, count, actual,
msg='expected {} occurrences of {!r}, but found {}'.format( count,
count, msg, actual)) msg="expected {} occurrences of {!r}, but found {}".format(
count, msg, actual
),
)
class TestCaseWithCLocale(unittest.TestCase): class TestCaseWithCLocale(unittest.TestCase):
@ -261,9 +272,10 @@ class TestCaseWithCLocale(unittest.TestCase):
Use utils.temporary_locale if you want a context manager ("with" statement). Use utils.temporary_locale if you want a context manager ("with" statement).
""" """
def setUp(self): def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL) self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, "C")
def tearDown(self): def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale) locale.setlocale(locale.LC_ALL, self.old_locale)

View file

@ -8,31 +8,30 @@ from pelican.tests.support import get_context, get_settings, unittest
CUR_DIR = os.path.dirname(__file__) CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, 'content') CONTENT_DIR = os.path.join(CUR_DIR, "content")
class TestCache(unittest.TestCase): class TestCache(unittest.TestCase):
def setUp(self): def setUp(self):
self.temp_cache = mkdtemp(prefix='pelican_cache.') self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self): def tearDown(self):
rmtree(self.temp_cache) rmtree(self.temp_cache)
def _get_cache_enabled_settings(self): def _get_cache_enabled_settings(self):
settings = get_settings() settings = get_settings()
settings['CACHE_CONTENT'] = True settings["CACHE_CONTENT"] = True
settings['LOAD_CONTENT_CACHE'] = True settings["LOAD_CONTENT_CACHE"] = True
settings['CACHE_PATH'] = self.temp_cache settings["CACHE_PATH"] = self.temp_cache
return settings return settings
def test_generator_caching(self): def test_generator_caching(self):
"""Test that cached and uncached content is same in generator level""" """Test that cached and uncached content is same in generator level"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'generator' settings["CONTENT_CACHING_LAYER"] = "generator"
settings['PAGE_PATHS'] = ['TestPages'] settings["PAGE_PATHS"] = ["TestPages"]
settings['DEFAULT_DATE'] = (1970, 1, 1) settings["DEFAULT_DATE"] = (1970, 1, 1)
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
def sorted_titles(items): def sorted_titles(items):
@ -40,15 +39,23 @@ class TestCache(unittest.TestCase):
# Articles # Articles
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
uncached_articles = sorted_titles(generator.articles) uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts) uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
cached_articles = sorted_titles(generator.articles) cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts) cached_drafts = sorted_titles(generator.drafts)
@ -58,16 +65,24 @@ class TestCache(unittest.TestCase):
# Pages # Pages
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
uncached_pages = sorted_titles(generator.pages) uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages) uncached_hidden_pages = sorted_titles(generator.hidden_pages)
uncached_draft_pages = sorted_titles(generator.draft_pages) uncached_draft_pages = sorted_titles(generator.draft_pages)
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
cached_pages = sorted_titles(generator.pages) cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages) cached_hidden_pages = sorted_titles(generator.hidden_pages)
@ -80,10 +95,10 @@ class TestCache(unittest.TestCase):
def test_reader_caching(self): def test_reader_caching(self):
"""Test that cached and uncached content is same in reader level""" """Test that cached and uncached content is same in reader level"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'reader' settings["CONTENT_CACHING_LAYER"] = "reader"
settings['PAGE_PATHS'] = ['TestPages'] settings["PAGE_PATHS"] = ["TestPages"]
settings['DEFAULT_DATE'] = (1970, 1, 1) settings["DEFAULT_DATE"] = (1970, 1, 1)
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
def sorted_titles(items): def sorted_titles(items):
@ -91,15 +106,23 @@ class TestCache(unittest.TestCase):
# Articles # Articles
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
uncached_articles = sorted_titles(generator.articles) uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts) uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
cached_articles = sorted_titles(generator.articles) cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts) cached_drafts = sorted_titles(generator.drafts)
@ -109,15 +132,23 @@ class TestCache(unittest.TestCase):
# Pages # Pages
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
uncached_pages = sorted_titles(generator.pages) uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages) uncached_hidden_pages = sorted_titles(generator.hidden_pages)
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
cached_pages = sorted_titles(generator.pages) cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages) cached_hidden_pages = sorted_titles(generator.hidden_pages)
@ -128,20 +159,28 @@ class TestCache(unittest.TestCase):
def test_article_object_caching(self): def test_article_object_caching(self):
"""Test Article objects caching at the generator level""" """Test Article objects caching at the generator level"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'generator' settings["CONTENT_CACHING_LAYER"] = "generator"
settings['DEFAULT_DATE'] = (1970, 1, 1) settings["DEFAULT_DATE"] = (1970, 1, 1)
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
self.assertTrue(hasattr(generator, '_cache')) self.assertTrue(hasattr(generator, "_cache"))
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
""" """
@ -158,18 +197,26 @@ class TestCache(unittest.TestCase):
def test_article_reader_content_caching(self): def test_article_reader_content_caching(self):
"""Test raw article content caching at the reader level""" """Test raw article content caching at the reader level"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
self.assertTrue(hasattr(generator.readers, '_cache')) self.assertTrue(hasattr(generator.readers, "_cache"))
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
readers = generator.readers.readers readers = generator.readers.readers
for reader in readers.values(): for reader in readers.values():
reader.read = MagicMock() reader.read = MagicMock()
@ -182,44 +229,58 @@ class TestCache(unittest.TestCase):
used in --ignore-cache or autoreload mode""" used in --ignore-cache or autoreload mode"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
self.assertTrue(hasattr(generator, '_cache_open')) self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count orig_call_count = generator.readers.read_file.call_count
settings['LOAD_CONTENT_CACHE'] = False settings["LOAD_CONTENT_CACHE"] = False
generator = ArticlesGenerator( generator = ArticlesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CONTENT_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
self.assertEqual( self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
generator.readers.read_file.call_count,
orig_call_count)
def test_page_object_caching(self): def test_page_object_caching(self):
"""Test Page objects caching at the generator level""" """Test Page objects caching at the generator level"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'generator' settings["CONTENT_CACHING_LAYER"] = "generator"
settings['PAGE_PATHS'] = ['TestPages'] settings["PAGE_PATHS"] = ["TestPages"]
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
self.assertTrue(hasattr(generator, '_cache')) self.assertTrue(hasattr(generator, "_cache"))
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
""" """
@ -231,19 +292,27 @@ class TestCache(unittest.TestCase):
def test_page_reader_content_caching(self): def test_page_reader_content_caching(self):
"""Test raw page content caching at the reader level""" """Test raw page content caching at the reader level"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['PAGE_PATHS'] = ['TestPages'] settings["PAGE_PATHS"] = ["TestPages"]
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context() generator.generate_context()
self.assertTrue(hasattr(generator.readers, '_cache')) self.assertTrue(hasattr(generator.readers, "_cache"))
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
readers = generator.readers.readers readers = generator.readers.readers
for reader in readers.values(): for reader in readers.values():
reader.read = MagicMock() reader.read = MagicMock()
@ -256,24 +325,30 @@ class TestCache(unittest.TestCase):
used in --ignore_cache or autoreload mode""" used in --ignore_cache or autoreload mode"""
settings = self._get_cache_enabled_settings() settings = self._get_cache_enabled_settings()
settings['PAGE_PATHS'] = ['TestPages'] settings["PAGE_PATHS"] = ["TestPages"]
settings['READERS'] = {'asc': None} settings["READERS"] = {"asc": None}
context = get_context(settings) context = get_context(settings)
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
self.assertTrue(hasattr(generator, '_cache_open')) self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count orig_call_count = generator.readers.read_file.call_count
settings['LOAD_CONTENT_CACHE'] = False settings["LOAD_CONTENT_CACHE"] = False
generator = PagesGenerator( generator = PagesGenerator(
context=context.copy(), settings=settings, context=context.copy(),
path=CUR_DIR, theme=settings['THEME'], output_path=None) settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock() generator.readers.read_file = MagicMock()
generator.generate_context() generator.generate_context()
self.assertEqual( self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
generator.readers.read_file.call_count,
orig_call_count)

View file

@ -5,68 +5,77 @@ from pelican import get_config, parse_arguments
class TestParseOverrides(unittest.TestCase): class TestParseOverrides(unittest.TestCase):
def test_flags(self): def test_flags(self):
for flag in ['-e', '--extra-settings']: for flag in ["-e", "--extra-settings"]:
args = parse_arguments([flag, 'k=1']) args = parse_arguments([flag, "k=1"])
self.assertDictEqual(args.overrides, {'k': 1}) self.assertDictEqual(args.overrides, {"k": 1})
def test_parse_multiple_items(self): def test_parse_multiple_items(self):
args = parse_arguments('-e k1=1 k2=2'.split()) args = parse_arguments("-e k1=1 k2=2".split())
self.assertDictEqual(args.overrides, {'k1': 1, 'k2': 2}) self.assertDictEqual(args.overrides, {"k1": 1, "k2": 2})
def test_parse_valid_json(self): def test_parse_valid_json(self):
json_values_python_values_map = { json_values_python_values_map = {
'""': '', '""': "",
'null': None, "null": None,
'"string"': 'string', '"string"': "string",
'["foo", 12, "4", {}]': ['foo', 12, '4', {}] '["foo", 12, "4", {}]': ["foo", 12, "4", {}],
} }
for k, v in json_values_python_values_map.items(): for k, v in json_values_python_values_map.items():
args = parse_arguments(['-e', 'k=' + k]) args = parse_arguments(["-e", "k=" + k])
self.assertDictEqual(args.overrides, {'k': v}) self.assertDictEqual(args.overrides, {"k": v})
def test_parse_invalid_syntax(self): def test_parse_invalid_syntax(self):
invalid_items = ['k= 1', 'k =1', 'k', 'k v'] invalid_items = ["k= 1", "k =1", "k", "k v"]
for item in invalid_items: for item in invalid_items:
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
parse_arguments(f'-e {item}'.split()) parse_arguments(f"-e {item}".split())
def test_parse_invalid_json(self): def test_parse_invalid_json(self):
invalid_json = { invalid_json = {
'', 'False', 'True', 'None', 'some other string', "",
'{"foo": bar}', '[foo]' "False",
"True",
"None",
"some other string",
'{"foo": bar}',
"[foo]",
} }
for v in invalid_json: for v in invalid_json:
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
parse_arguments(['-e ', 'k=' + v]) parse_arguments(["-e ", "k=" + v])
class TestGetConfigFromArgs(unittest.TestCase): class TestGetConfigFromArgs(unittest.TestCase):
def test_overrides_known_keys(self): def test_overrides_known_keys(self):
args = parse_arguments([ args = parse_arguments(
'-e', [
'DELETE_OUTPUT_DIRECTORY=false', "-e",
'OUTPUT_RETENTION=["1.txt"]', "DELETE_OUTPUT_DIRECTORY=false",
'SITENAME="Title"' 'OUTPUT_RETENTION=["1.txt"]',
]) 'SITENAME="Title"',
]
)
config = get_config(args) config = get_config(args)
config_must_contain = { config_must_contain = {
'DELETE_OUTPUT_DIRECTORY': False, "DELETE_OUTPUT_DIRECTORY": False,
'OUTPUT_RETENTION': ['1.txt'], "OUTPUT_RETENTION": ["1.txt"],
'SITENAME': 'Title' "SITENAME": "Title",
} }
self.assertDictEqual(config, {**config, **config_must_contain}) self.assertDictEqual(config, {**config, **config_must_contain})
def test_overrides_non_default_type(self): def test_overrides_non_default_type(self):
args = parse_arguments([ args = parse_arguments(
'-e', [
'DISPLAY_PAGES_ON_MENU=123', "-e",
'PAGE_TRANSLATION_ID=null', "DISPLAY_PAGES_ON_MENU=123",
'TRANSLATION_FEED_RSS_URL="someurl"' "PAGE_TRANSLATION_ID=null",
]) 'TRANSLATION_FEED_RSS_URL="someurl"',
]
)
config = get_config(args) config = get_config(args)
config_must_contain = { config_must_contain = {
'DISPLAY_PAGES_ON_MENU': 123, "DISPLAY_PAGES_ON_MENU": 123,
'PAGE_TRANSLATION_ID': None, "PAGE_TRANSLATION_ID": None,
'TRANSLATION_FEED_RSS_URL': 'someurl' "TRANSLATION_FEED_RSS_URL": "someurl",
} }
self.assertDictEqual(config, {**config, **config_must_contain}) self.assertDictEqual(config, {**config, **config_must_contain})

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -4,26 +4,35 @@ from posixpath import join as posix_join
from unittest.mock import patch from unittest.mock import patch
from pelican.settings import DEFAULT_CONFIG from pelican.settings import DEFAULT_CONFIG
from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder, from pelican.tests.support import (
unittest, TestCaseWithCLocale) mute,
from pelican.tools.pelican_import import (blogger2fields, build_header, skipIfNoExecutable,
build_markdown_header, temporary_folder,
decode_wp_content, unittest,
download_attachments, fields2pelican, TestCaseWithCLocale,
get_attachments, tumblr2fields, )
wp2fields, from pelican.tools.pelican_import import (
) blogger2fields,
build_header,
build_markdown_header,
decode_wp_content,
download_attachments,
fields2pelican,
get_attachments,
tumblr2fields,
wp2fields,
)
from pelican.utils import path_to_file_url, slugify from pelican.utils import path_to_file_url, slugify
CUR_DIR = os.path.abspath(os.path.dirname(__file__)) CUR_DIR = os.path.abspath(os.path.dirname(__file__))
BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'bloggerexport.xml') BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, "content", "bloggerexport.xml")
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml') WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, "content", "wordpressexport.xml")
WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(CUR_DIR, WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(
'content', CUR_DIR, "content", "wordpress_content_encoded"
'wordpress_content_encoded') )
WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(CUR_DIR, WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(
'content', CUR_DIR, "content", "wordpress_content_decoded"
'wordpress_content_decoded') )
try: try:
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@ -36,10 +45,9 @@ except ImportError:
LXML = False LXML = False
@skipIfNoExecutable(['pandoc', '--version']) @skipIfNoExecutable(["pandoc", "--version"])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module') @unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestBloggerXmlImporter(TestCaseWithCLocale): class TestBloggerXmlImporter(TestCaseWithCLocale):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
self.posts = blogger2fields(BLOGGER_XML_SAMPLE) self.posts = blogger2fields(BLOGGER_XML_SAMPLE)
@ -50,16 +58,17 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
""" """
test_posts = list(self.posts) test_posts = list(self.posts)
kinds = {x[8] for x in test_posts} kinds = {x[8] for x in test_posts}
self.assertEqual({'page', 'article', 'comment'}, kinds) self.assertEqual({"page", "article", "comment"}, kinds)
page_titles = {x[0] for x in test_posts if x[8] == 'page'} page_titles = {x[0] for x in test_posts if x[8] == "page"}
self.assertEqual({'Test page', 'Test page 2'}, page_titles) self.assertEqual({"Test page", "Test page 2"}, page_titles)
article_titles = {x[0] for x in test_posts if x[8] == 'article'} article_titles = {x[0] for x in test_posts if x[8] == "article"}
self.assertEqual({'Black as Egypt\'s Night', 'The Steel Windpipe'}, self.assertEqual(
article_titles) {"Black as Egypt's Night", "The Steel Windpipe"}, article_titles
comment_titles = {x[0] for x in test_posts if x[8] == 'comment'} )
self.assertEqual({'Mishka, always a pleasure to read your ' comment_titles = {x[0] for x in test_posts if x[8] == "comment"}
'adventures!...'}, self.assertEqual(
comment_titles) {"Mishka, always a pleasure to read your " "adventures!..."}, comment_titles
)
def test_recognise_status_with_correct_filename(self): def test_recognise_status_with_correct_filename(self):
"""Check that importerer outputs only statuses 'published' and 'draft', """Check that importerer outputs only statuses 'published' and 'draft',
@ -67,24 +76,25 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
""" """
test_posts = list(self.posts) test_posts = list(self.posts)
statuses = {x[7] for x in test_posts} statuses = {x[7] for x in test_posts}
self.assertEqual({'published', 'draft'}, statuses) self.assertEqual({"published", "draft"}, statuses)
draft_filenames = {x[2] for x in test_posts if x[7] == 'draft'} draft_filenames = {x[2] for x in test_posts if x[7] == "draft"}
# draft filenames are id-based # draft filenames are id-based
self.assertEqual({'page-4386962582497458967', self.assertEqual(
'post-1276418104709695660'}, draft_filenames) {"page-4386962582497458967", "post-1276418104709695660"}, draft_filenames
)
published_filenames = {x[2] for x in test_posts if x[7] == 'published'} published_filenames = {x[2] for x in test_posts if x[7] == "published"}
# published filenames are url-based, except comments # published filenames are url-based, except comments
self.assertEqual({'the-steel-windpipe', self.assertEqual(
'test-page', {"the-steel-windpipe", "test-page", "post-5590533389087749201"},
'post-5590533389087749201'}, published_filenames) published_filenames,
)
@skipIfNoExecutable(['pandoc', '--version']) @skipIfNoExecutable(["pandoc", "--version"])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module') @unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestWordpressXmlImporter(TestCaseWithCLocale): class TestWordpressXmlImporter(TestCaseWithCLocale):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
self.posts = wp2fields(WORDPRESS_XML_SAMPLE) self.posts = wp2fields(WORDPRESS_XML_SAMPLE)
@ -92,30 +102,49 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_ignore_empty_posts(self): def test_ignore_empty_posts(self):
self.assertTrue(self.posts) self.assertTrue(self.posts)
for (title, content, fname, date, author, for (
categ, tags, status, kind, format) in self.posts: title,
content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.posts:
self.assertTrue(title.strip()) self.assertTrue(title.strip())
def test_recognise_page_kind(self): def test_recognise_page_kind(self):
""" Check that we recognise pages in wordpress, as opposed to posts """ """Check that we recognise pages in wordpress, as opposed to posts"""
self.assertTrue(self.posts) self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page # Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = [] pages_data = []
for (title, content, fname, date, author, for (
categ, tags, status, kind, format) in self.posts: title,
if kind == 'page': content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.posts:
if kind == "page":
pages_data.append((title, fname)) pages_data.append((title, fname))
self.assertEqual(2, len(pages_data)) self.assertEqual(2, len(pages_data))
self.assertEqual(('Page', 'contact'), pages_data[0]) self.assertEqual(("Page", "contact"), pages_data[0])
self.assertEqual(('Empty Page', 'empty'), pages_data[1]) self.assertEqual(("Empty Page", "empty"), pages_data[1])
def test_dirpage_directive_for_page_kind(self): def test_dirpage_directive_for_page_kind(self):
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts) test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp: with temporary_folder() as temp:
fname = list(silent_f2p(test_post, 'markdown', fname = list(silent_f2p(test_post, "markdown", temp, dirpage=True))[0]
temp, dirpage=True))[0] self.assertTrue(fname.endswith("pages%sempty.md" % os.path.sep))
self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
def test_dircat(self): def test_dircat(self):
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
@ -125,14 +154,13 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
if len(post[5]) > 0: # Has a category if len(post[5]) > 0: # Has a category
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', fnames = list(silent_f2p(test_posts, "markdown", temp, dircat=True))
temp, dircat=True)) subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True) category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += '.md' name += ".md"
filename = os.path.join(category, name) filename = os.path.join(category, name)
out_name = fnames[index] out_name = fnames[index]
self.assertTrue(out_name.endswith(filename)) self.assertTrue(out_name.endswith(filename))
@ -141,9 +169,19 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_unless_custom_post_all_items_should_be_pages_or_posts(self): def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts) self.assertTrue(self.posts)
pages_data = [] pages_data = []
for (title, content, fname, date, author, categ, for (
tags, status, kind, format) in self.posts: title,
if kind == 'page' or kind == 'article': content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.posts:
if kind == "page" or kind == "article":
pass pass
else: else:
pages_data.append((title, fname)) pages_data.append((title, fname))
@ -152,40 +190,45 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_recognise_custom_post_type(self): def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts) self.assertTrue(self.custposts)
cust_data = [] cust_data = []
for (title, content, fname, date, author, categ, for (
tags, status, kind, format) in self.custposts: title,
if kind == 'article' or kind == 'page': content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.custposts:
if kind == "article" or kind == "page":
pass pass
else: else:
cust_data.append((title, kind)) cust_data.append((title, kind))
self.assertEqual(3, len(cust_data)) self.assertEqual(3, len(cust_data))
self.assertEqual(("A custom post in category 4", "custom1"), cust_data[0])
self.assertEqual(("A custom post in category 5", "custom1"), cust_data[1])
self.assertEqual( self.assertEqual(
('A custom post in category 4', 'custom1'), ("A 2nd custom post type also in category 5", "custom2"), cust_data[2]
cust_data[0]) )
self.assertEqual(
('A custom post in category 5', 'custom1'),
cust_data[1])
self.assertEqual(
('A 2nd custom post type also in category 5', 'custom2'),
cust_data[2])
def test_custom_posts_put_in_own_dir(self): def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_posts = [] test_posts = []
for post in self.custposts: for post in self.custposts:
# check post kind # check post kind
if post[8] == 'article' or post[8] == 'page': if post[8] == "article" or post[8] == "page":
pass pass
else: else:
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', fnames = list(silent_f2p(test_posts, "markdown", temp, wp_custpost=True))
temp, wp_custpost=True))
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
kind = post[8] kind = post[8]
name += '.md' name += ".md"
filename = os.path.join(kind, name) filename = os.path.join(kind, name)
out_name = fnames[index] out_name = fnames[index]
self.assertTrue(out_name.endswith(filename)) self.assertTrue(out_name.endswith(filename))
@ -196,20 +239,21 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = [] test_posts = []
for post in self.custposts: for post in self.custposts:
# check post kind # check post kind
if post[8] == 'article' or post[8] == 'page': if post[8] == "article" or post[8] == "page":
pass pass
else: else:
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, fnames = list(
wp_custpost=True, dircat=True)) silent_f2p(test_posts, "markdown", temp, wp_custpost=True, dircat=True)
subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS'] )
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
kind = post[8] kind = post[8]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True) category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += '.md' name += ".md"
filename = os.path.join(kind, category, name) filename = os.path.join(kind, category, name)
out_name = fnames[index] out_name = fnames[index]
self.assertTrue(out_name.endswith(filename)) self.assertTrue(out_name.endswith(filename))
@ -221,16 +265,19 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = [] test_posts = []
for post in self.custposts: for post in self.custposts:
# check post kind # check post kind
if post[8] == 'page': if post[8] == "page":
test_posts.append(post) test_posts.append(post)
with temporary_folder() as temp: with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp, fnames = list(
wp_custpost=True, dirpage=False)) silent_f2p(
test_posts, "markdown", temp, wp_custpost=True, dirpage=False
)
)
index = 0 index = 0
for post in test_posts: for post in test_posts:
name = post[2] name = post[2]
name += '.md' name += ".md"
filename = os.path.join('pages', name) filename = os.path.join("pages", name)
out_name = fnames[index] out_name = fnames[index]
self.assertFalse(out_name.endswith(filename)) self.assertFalse(out_name.endswith(filename))
@ -238,117 +285,114 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = list(self.posts) test_posts = list(self.posts)
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding="utf-8") as infile:
return infile.read() return infile.read()
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp: with temporary_folder() as temp:
rst_files = (r(f) for f in silent_f2p(test_posts, "markdown", temp))
rst_files = (r(f) for f self.assertTrue(any("<iframe" in rst for rst in rst_files))
in silent_f2p(test_posts, 'markdown', temp)) rst_files = (
self.assertTrue(any('<iframe' in rst for rst in rst_files)) r(f) for f in silent_f2p(test_posts, "markdown", temp, strip_raw=True)
rst_files = (r(f) for f )
in silent_f2p(test_posts, 'markdown', self.assertFalse(any("<iframe" in rst for rst in rst_files))
temp, strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
# no effect in rst # no effect in rst
rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp)) rst_files = (r(f) for f in silent_f2p(test_posts, "rst", temp))
self.assertFalse(any('<iframe' in rst for rst in rst_files)) self.assertFalse(any("<iframe" in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp, rst_files = (
strip_raw=True)) r(f) for f in silent_f2p(test_posts, "rst", temp, strip_raw=True)
self.assertFalse(any('<iframe' in rst for rst in rst_files)) )
self.assertFalse(any("<iframe" in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self): def test_decode_html_entities_in_titles(self):
test_posts = [post for post test_posts = [post for post in self.posts if post[2] == "html-entity-test"]
in self.posts if post[2] == 'html-entity-test']
self.assertEqual(len(test_posts), 1) self.assertEqual(len(test_posts), 1)
post = test_posts[0] post = test_posts[0]
title = post[0] title = post[0]
self.assertTrue(title, "A normal post with some <html> entities in " self.assertTrue(
"the title. You can't miss them.") title,
self.assertNotIn('&', title) "A normal post with some <html> entities in "
"the title. You can't miss them.",
)
self.assertNotIn("&", title)
def test_decode_wp_content_returns_empty(self): def test_decode_wp_content_returns_empty(self):
""" Check that given an empty string we return an empty string.""" """Check that given an empty string we return an empty string."""
self.assertEqual(decode_wp_content(""), "") self.assertEqual(decode_wp_content(""), "")
def test_decode_wp_content(self): def test_decode_wp_content(self):
""" Check that we can decode a wordpress content string.""" """Check that we can decode a wordpress content string."""
with open(WORDPRESS_ENCODED_CONTENT_SAMPLE) as encoded_file: with open(WORDPRESS_ENCODED_CONTENT_SAMPLE) as encoded_file:
encoded_content = encoded_file.read() encoded_content = encoded_file.read()
with open(WORDPRESS_DECODED_CONTENT_SAMPLE) as decoded_file: with open(WORDPRESS_DECODED_CONTENT_SAMPLE) as decoded_file:
decoded_content = decoded_file.read() decoded_content = decoded_file.read()
self.assertEqual( self.assertEqual(
decode_wp_content(encoded_content, br=False), decode_wp_content(encoded_content, br=False), decoded_content
decoded_content) )
def test_preserve_verbatim_formatting(self): def test_preserve_verbatim_formatting(self):
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding="utf-8") as infile:
return infile.read() return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Code in List"),
self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md))
self.assertTrue(re.search(r'\s+b = \[4, 5, 6\]', md))
for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0) silent_f2p = mute(True)(fields2pelican)
print_line = re.search(r'\s+print i', md).group(0) test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
self.assertTrue( with temporary_folder() as temp:
for_line.rindex('for') < print_line.rindex('print')) md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
self.assertTrue(re.search(r"\s+a = \[1, 2, 3\]", md))
self.assertTrue(re.search(r"\s+b = \[4, 5, 6\]", md))
for_line = re.search(r"\s+for i in zip\(a, b\):", md).group(0)
print_line = re.search(r"\s+print i", md).group(0)
self.assertTrue(for_line.rindex("for") < print_line.rindex("print"))
def test_code_in_list(self): def test_code_in_list(self):
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding="utf-8") as infile:
return infile.read() return infile.read()
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_post = filter( test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
lambda p: p[0].startswith("Code in List"),
self.posts)
with temporary_folder() as temp: with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0] md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
sample_line = re.search(r'- This is a code sample', md).group(0) sample_line = re.search(r"- This is a code sample", md).group(0)
code_line = re.search(r'\s+a = \[1, 2, 3\]', md).group(0) code_line = re.search(r"\s+a = \[1, 2, 3\]", md).group(0)
self.assertTrue(sample_line.rindex('This') < code_line.rindex('a')) self.assertTrue(sample_line.rindex("This") < code_line.rindex("a"))
def test_dont_use_smart_quotes(self): def test_dont_use_smart_quotes(self):
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding="utf-8") as infile:
return infile.read() return infile.read()
silent_f2p = mute(True)(fields2pelican) silent_f2p = mute(True)(fields2pelican)
test_post = filter( test_post = filter(lambda p: p[0].startswith("Post with raw data"), self.posts)
lambda p: p[0].startswith("Post with raw data"),
self.posts)
with temporary_folder() as temp: with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0] md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
escaped_quotes = re.search(r'\\[\'"“”‘’]', md) escaped_quotes = re.search(r'\\[\'"“”‘’]', md)
self.assertFalse(escaped_quotes) self.assertFalse(escaped_quotes)
def test_convert_caption_to_figure(self): def test_convert_caption_to_figure(self):
def r(f): def r(f):
with open(f, encoding='utf-8') as infile: with open(f, encoding="utf-8") as infile:
return infile.read() return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Caption on image"),
self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
caption = re.search(r'\[caption', md) silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Caption on image"), self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
caption = re.search(r"\[caption", md)
self.assertFalse(caption) self.assertFalse(caption)
for occurence in [ for occurence in [
'/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png', "/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png",
'/theme/img/xpelican-3.png.pagespeed.ic.m-NAIdRCOM.png', "/theme/img/xpelican-3.png.pagespeed.ic.m-NAIdRCOM.png",
'/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png', "/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png",
'This is a pelican', "This is a pelican",
'This also a pelican', "This also a pelican",
'Yet another pelican', "Yet another pelican",
]: ]:
# pandoc 2.x converts into ![text](src) # pandoc 2.x converts into ![text](src)
# pandoc 3.x converts into <figure>src<figcaption>text</figcaption></figure> # pandoc 3.x converts into <figure>src<figcaption>text</figcaption></figure>
@ -357,70 +401,97 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
class TestBuildHeader(unittest.TestCase): class TestBuildHeader(unittest.TestCase):
def test_build_header(self): def test_build_header(self):
header = build_header('test', None, None, None, None, None) header = build_header("test", None, None, None, None, None)
self.assertEqual(header, 'test\n####\n\n') self.assertEqual(header, "test\n####\n\n")
def test_build_header_with_fields(self): def test_build_header_with_fields(self):
header_data = [ header_data = [
'Test Post', "Test Post",
'2014-11-04', "2014-11-04",
'Alexis Métaireau', "Alexis Métaireau",
['Programming'], ["Programming"],
['Pelican', 'Python'], ["Pelican", "Python"],
'test-post', "test-post",
] ]
expected_docutils = '\n'.join([ expected_docutils = "\n".join(
'Test Post', [
'#########', "Test Post",
':date: 2014-11-04', "#########",
':author: Alexis Métaireau', ":date: 2014-11-04",
':category: Programming', ":author: Alexis Métaireau",
':tags: Pelican, Python', ":category: Programming",
':slug: test-post', ":tags: Pelican, Python",
'\n', ":slug: test-post",
]) "\n",
]
)
expected_md = '\n'.join([ expected_md = "\n".join(
'Title: Test Post', [
'Date: 2014-11-04', "Title: Test Post",
'Author: Alexis Métaireau', "Date: 2014-11-04",
'Category: Programming', "Author: Alexis Métaireau",
'Tags: Pelican, Python', "Category: Programming",
'Slug: test-post', "Tags: Pelican, Python",
'\n', "Slug: test-post",
]) "\n",
]
)
self.assertEqual(build_header(*header_data), expected_docutils) self.assertEqual(build_header(*header_data), expected_docutils)
self.assertEqual(build_markdown_header(*header_data), expected_md) self.assertEqual(build_markdown_header(*header_data), expected_md)
def test_build_header_with_east_asian_characters(self): def test_build_header_with_east_asian_characters(self):
header = build_header('これは広い幅の文字だけで構成されたタイトルです', header = build_header(
None, None, None, None, None) "これは広い幅の文字だけで構成されたタイトルです",
None,
None,
None,
None,
None,
)
self.assertEqual(header,
('これは広い幅の文字だけで構成されたタイトルです\n'
'##############################################'
'\n\n'))
def test_galleries_added_to_header(self):
header = build_header('test', None, None, None, None, None,
attachments=['output/test1', 'output/test2'])
self.assertEqual(header, ('test\n####\n'
':attachments: output/test1, '
'output/test2\n\n'))
def test_galleries_added_to_markdown_header(self):
header = build_markdown_header('test', None, None, None, None, None,
attachments=['output/test1',
'output/test2'])
self.assertEqual( self.assertEqual(
header, header,
'Title: test\nAttachments: output/test1, output/test2\n\n') (
"これは広い幅の文字だけで構成されたタイトルです\n"
"##############################################"
"\n\n"
),
)
def test_galleries_added_to_header(self):
header = build_header(
"test",
None,
None,
None,
None,
None,
attachments=["output/test1", "output/test2"],
)
self.assertEqual(
header, ("test\n####\n" ":attachments: output/test1, " "output/test2\n\n")
)
def test_galleries_added_to_markdown_header(self):
header = build_markdown_header(
"test",
None,
None,
None,
None,
None,
attachments=["output/test1", "output/test2"],
)
self.assertEqual(
header, "Title: test\nAttachments: output/test1, output/test2\n\n"
)
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module') @unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
@unittest.skipUnless(LXML, 'Needs lxml module') @unittest.skipUnless(LXML, "Needs lxml module")
class TestWordpressXMLAttachements(TestCaseWithCLocale): class TestWordpressXMLAttachements(TestCaseWithCLocale):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
@ -435,38 +506,45 @@ class TestWordpressXMLAttachements(TestCaseWithCLocale):
for post in self.attachments.keys(): for post in self.attachments.keys():
if post is None: if post is None:
expected = { expected = {
('https://upload.wikimedia.org/wikipedia/commons/' (
'thumb/2/2c/Pelican_lakes_entrance02.jpg/' "https://upload.wikimedia.org/wikipedia/commons/"
'240px-Pelican_lakes_entrance02.jpg') "thumb/2/2c/Pelican_lakes_entrance02.jpg/"
"240px-Pelican_lakes_entrance02.jpg"
)
} }
self.assertEqual(self.attachments[post], expected) self.assertEqual(self.attachments[post], expected)
elif post == 'with-excerpt': elif post == "with-excerpt":
expected_invalid = ('http://thisurlisinvalid.notarealdomain/' expected_invalid = (
'not_an_image.jpg') "http://thisurlisinvalid.notarealdomain/" "not_an_image.jpg"
expected_pelikan = ('http://en.wikipedia.org/wiki/' )
'File:Pelikan_Walvis_Bay.jpg') expected_pelikan = (
self.assertEqual(self.attachments[post], "http://en.wikipedia.org/wiki/" "File:Pelikan_Walvis_Bay.jpg"
{expected_invalid, expected_pelikan}) )
elif post == 'with-tags': self.assertEqual(
expected_invalid = ('http://thisurlisinvalid.notarealdomain') self.attachments[post], {expected_invalid, expected_pelikan}
)
elif post == "with-tags":
expected_invalid = "http://thisurlisinvalid.notarealdomain"
self.assertEqual(self.attachments[post], {expected_invalid}) self.assertEqual(self.attachments[post], {expected_invalid})
else: else:
self.fail('all attachments should match to a ' self.fail(
'filename or None, {}' "all attachments should match to a " "filename or None, {}".format(
.format(post)) post
)
)
def test_download_attachments(self): def test_download_attachments(self):
real_file = os.path.join(CUR_DIR, 'content/article.rst') real_file = os.path.join(CUR_DIR, "content/article.rst")
good_url = path_to_file_url(real_file) good_url = path_to_file_url(real_file)
bad_url = 'http://localhost:1/not_a_file.txt' bad_url = "http://localhost:1/not_a_file.txt"
silent_da = mute()(download_attachments) silent_da = mute()(download_attachments)
with temporary_folder() as temp: with temporary_folder() as temp:
locations = list(silent_da(temp, [good_url, bad_url])) locations = list(silent_da(temp, [good_url, bad_url]))
self.assertEqual(1, len(locations)) self.assertEqual(1, len(locations))
directory = locations[0] directory = locations[0]
self.assertTrue( self.assertTrue(
directory.endswith(posix_join('content', 'article.rst')), directory.endswith(posix_join("content", "article.rst")), directory
directory) )
class TestTumblrImporter(TestCaseWithCLocale): class TestTumblrImporter(TestCaseWithCLocale):
@ -484,32 +562,42 @@ class TestTumblrImporter(TestCaseWithCLocale):
"timestamp": 1573162000, "timestamp": 1573162000,
"format": "html", "format": "html",
"slug": "a-slug", "slug": "a-slug",
"tags": [ "tags": ["economics"],
"economics"
],
"state": "published", "state": "published",
"photos": [ "photos": [
{ {
"caption": "", "caption": "",
"original_size": { "original_size": {
"url": "https://..fccdc2360ba7182a.jpg", "url": "https://..fccdc2360ba7182a.jpg",
"width": 634, "width": 634,
"height": 789 "height": 789,
}, },
}] }
],
} }
] ]
get.side_effect = get_posts get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname")) posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual( self.assertEqual(
[('Photo', [
'<img alt="" src="https://..fccdc2360ba7182a.jpg" />\n', (
'2019-11-07-a-slug', '2019-11-07 21:26:40+0000', 'testy', ['photo'], "Photo",
['economics'], 'published', 'article', 'html')], '<img alt="" src="https://..fccdc2360ba7182a.jpg" />\n',
"2019-11-07-a-slug",
"2019-11-07 21:26:40+0000",
"testy",
["photo"],
["economics"],
"published",
"article",
"html",
)
],
posts, posts,
posts) posts,
)
@patch("pelican.tools.pelican_import._get_tumblr_posts") @patch("pelican.tools.pelican_import._get_tumblr_posts")
def test_video_embed(self, get): def test_video_embed(self, get):
@ -531,40 +619,39 @@ class TestTumblrImporter(TestCaseWithCLocale):
"source_title": "youtube.com", "source_title": "youtube.com",
"caption": "<p>Caption</p>", "caption": "<p>Caption</p>",
"player": [ "player": [
{ {"width": 250, "embed_code": "<iframe>1</iframe>"},
"width": 250, {"width": 400, "embed_code": "<iframe>2</iframe>"},
"embed_code": {"width": 500, "embed_code": "<iframe>3</iframe>"},
"<iframe>1</iframe>"
},
{
"width": 400,
"embed_code":
"<iframe>2</iframe>"
},
{
"width": 500,
"embed_code":
"<iframe>3</iframe>"
}
], ],
"video_type": "youtube", "video_type": "youtube",
} }
] ]
get.side_effect = get_posts get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname")) posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual( self.assertEqual(
[('youtube.com', [
'<p><a href="https://href.li/?' (
'https://www.youtube.com/a">via</a></p>\n<p>Caption</p>' "youtube.com",
'<iframe>1</iframe>\n' '<p><a href="https://href.li/?'
'<iframe>2</iframe>\n' 'https://www.youtube.com/a">via</a></p>\n<p>Caption</p>'
'<iframe>3</iframe>\n', "<iframe>1</iframe>\n"
'2017-07-07-the-slug', "<iframe>2</iframe>\n"
'2017-07-07 20:31:41+0000', 'testy', ['video'], [], 'published', "<iframe>3</iframe>\n",
'article', 'html')], "2017-07-07-the-slug",
"2017-07-07 20:31:41+0000",
"testy",
["video"],
[],
"published",
"article",
"html",
)
],
posts, posts,
posts) posts,
)
@patch("pelican.tools.pelican_import._get_tumblr_posts") @patch("pelican.tools.pelican_import._get_tumblr_posts")
def test_broken_video_embed(self, get): def test_broken_video_embed(self, get):
@ -581,42 +668,43 @@ class TestTumblrImporter(TestCaseWithCLocale):
"timestamp": 1471192655, "timestamp": 1471192655,
"state": "published", "state": "published",
"format": "html", "format": "html",
"tags": [ "tags": ["interviews"],
"interviews" "source_url": "https://href.li/?https://www.youtube.com/watch?v=b",
],
"source_url":
"https://href.li/?https://www.youtube.com/watch?v=b",
"source_title": "youtube.com", "source_title": "youtube.com",
"caption": "caption": "<p>Caption</p>",
"<p>Caption</p>",
"player": [ "player": [
{ {
"width": 250, "width": 250,
# If video is gone, embed_code is False # If video is gone, embed_code is False
"embed_code": False "embed_code": False,
}, },
{ {"width": 400, "embed_code": False},
"width": 400, {"width": 500, "embed_code": False},
"embed_code": False
},
{
"width": 500,
"embed_code": False
}
], ],
"video_type": "youtube", "video_type": "youtube",
} }
] ]
get.side_effect = get_posts get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname")) posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual( self.assertEqual(
[('youtube.com', [
'<p><a href="https://href.li/?https://www.youtube.com/watch?' (
'v=b">via</a></p>\n<p>Caption</p>' "youtube.com",
'<p>(This video isn\'t available anymore.)</p>\n', '<p><a href="https://href.li/?https://www.youtube.com/watch?'
'2016-08-14-the-slug', 'v=b">via</a></p>\n<p>Caption</p>'
'2016-08-14 16:37:35+0000', 'testy', ['video'], ['interviews'], "<p>(This video isn't available anymore.)</p>\n",
'published', 'article', 'html')], "2016-08-14-the-slug",
"2016-08-14 16:37:35+0000",
"testy",
["video"],
["interviews"],
"published",
"article",
"html",
)
],
posts, posts,
posts) posts,
)

View file

@ -35,48 +35,41 @@ class TestLog(unittest.TestCase):
def test_log_filter(self): def test_log_filter(self):
def do_logging(): def do_logging():
for i in range(5): for i in range(5):
self.logger.warning('Log %s', i) self.logger.warning("Log %s", i)
self.logger.warning('Another log %s', i) self.logger.warning("Another log %s", i)
# no filter # no filter
with self.reset_logger(): with self.reset_logger():
do_logging() do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 5)
self.assertEqual( self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING), self.handler.count_logs("Another log \\d", logging.WARNING), 5
5) )
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
5)
# filter by template # filter by template
with self.reset_logger(): with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, 'Log %s')) log.LimitFilter._ignore.add((logging.WARNING, "Log %s"))
do_logging() do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 0)
self.assertEqual( self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING), self.handler.count_logs("Another log \\d", logging.WARNING), 5
0) )
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
5)
# filter by exact message # filter by exact message
with self.reset_logger(): with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) log.LimitFilter._ignore.add((logging.WARNING, "Log 3"))
do_logging() do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 4)
self.assertEqual( self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING), self.handler.count_logs("Another log \\d", logging.WARNING), 5
4) )
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
5)
# filter by both # filter by both
with self.reset_logger(): with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, 'Log 3')) log.LimitFilter._ignore.add((logging.WARNING, "Log 3"))
log.LimitFilter._ignore.add((logging.WARNING, 'Another log %s')) log.LimitFilter._ignore.add((logging.WARNING, "Another log %s"))
do_logging() do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 4)
self.assertEqual( self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING), self.handler.count_logs("Another log \\d", logging.WARNING), 0
4) )
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
0)

View file

@ -17,17 +17,17 @@ class TestPage(unittest.TestCase):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
self.old_locale = locale.setlocale(locale.LC_ALL) self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, "C")
self.page_kwargs = { self.page_kwargs = {
'content': TEST_CONTENT, "content": TEST_CONTENT,
'context': { "context": {
'localsiteurl': '', "localsiteurl": "",
}, },
'metadata': { "metadata": {
'summary': TEST_SUMMARY, "summary": TEST_SUMMARY,
'title': 'foo bar', "title": "foo bar",
}, },
'source_path': '/path/to/file/foo.ext' "source_path": "/path/to/file/foo.ext",
} }
def tearDown(self): def tearDown(self):
@ -37,68 +37,79 @@ class TestPage(unittest.TestCase):
settings = get_settings() settings = get_settings()
# fix up pagination rules # fix up pagination rules
from pelican.paginator import PaginationRule from pelican.paginator import PaginationRule
pagination_rules = [ pagination_rules = [
PaginationRule(*r) for r in settings.get( PaginationRule(*r)
'PAGINATION_PATTERNS', for r in settings.get(
DEFAULT_CONFIG['PAGINATION_PATTERNS'], "PAGINATION_PATTERNS",
DEFAULT_CONFIG["PAGINATION_PATTERNS"],
) )
] ]
settings['PAGINATION_PATTERNS'] = sorted( settings["PAGINATION_PATTERNS"] = sorted(
pagination_rules, pagination_rules,
key=lambda r: r[0], key=lambda r: r[0],
) )
self.page_kwargs['metadata']['author'] = Author('Blogger', settings) self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [Article(**self.page_kwargs), object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)]
Article(**self.page_kwargs)] paginator = Paginator("foobar.foo", "foobar/foo", object_list, settings)
paginator = Paginator('foobar.foo', 'foobar/foo', object_list,
settings)
page = paginator.page(1) page = paginator.page(1)
self.assertEqual(page.save_as, 'foobar.foo') self.assertEqual(page.save_as, "foobar.foo")
def test_custom_pagination_pattern(self): def test_custom_pagination_pattern(self):
from pelican.paginator import PaginationRule from pelican.paginator import PaginationRule
settings = get_settings()
settings['PAGINATION_PATTERNS'] = [PaginationRule(*r) for r in [
(1, '/{url}', '{base_name}/index.html'),
(2, '/{url}{number}/', '{base_name}/{number}/index.html')
]]
self.page_kwargs['metadata']['author'] = Author('Blogger', settings) settings = get_settings()
object_list = [Article(**self.page_kwargs), settings["PAGINATION_PATTERNS"] = [
Article(**self.page_kwargs)] PaginationRule(*r)
paginator = Paginator('blog/index.html', '//blog.my.site/', for r in [
object_list, settings, 1) (1, "/{url}", "{base_name}/index.html"),
(2, "/{url}{number}/", "{base_name}/{number}/index.html"),
]
]
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)]
paginator = Paginator(
"blog/index.html", "//blog.my.site/", object_list, settings, 1
)
# The URL *has to* stay absolute (with // in the front), so verify that # The URL *has to* stay absolute (with // in the front), so verify that
page1 = paginator.page(1) page1 = paginator.page(1)
self.assertEqual(page1.save_as, 'blog/index.html') self.assertEqual(page1.save_as, "blog/index.html")
self.assertEqual(page1.url, '//blog.my.site/') self.assertEqual(page1.url, "//blog.my.site/")
page2 = paginator.page(2) page2 = paginator.page(2)
self.assertEqual(page2.save_as, 'blog/2/index.html') self.assertEqual(page2.save_as, "blog/2/index.html")
self.assertEqual(page2.url, '//blog.my.site/2/') self.assertEqual(page2.url, "//blog.my.site/2/")
def test_custom_pagination_pattern_last_page(self): def test_custom_pagination_pattern_last_page(self):
from pelican.paginator import PaginationRule from pelican.paginator import PaginationRule
settings = get_settings()
settings['PAGINATION_PATTERNS'] = [PaginationRule(*r) for r in [
(1, '/{url}1/', '{base_name}/1/index.html'),
(2, '/{url}{number}/', '{base_name}/{number}/index.html'),
(-1, '/{url}', '{base_name}/index.html'),
]]
self.page_kwargs['metadata']['author'] = Author('Blogger', settings) settings = get_settings()
object_list = [Article(**self.page_kwargs), settings["PAGINATION_PATTERNS"] = [
Article(**self.page_kwargs), PaginationRule(*r)
Article(**self.page_kwargs)] for r in [
paginator = Paginator('blog/index.html', '//blog.my.site/', (1, "/{url}1/", "{base_name}/1/index.html"),
object_list, settings, 1) (2, "/{url}{number}/", "{base_name}/{number}/index.html"),
(-1, "/{url}", "{base_name}/index.html"),
]
]
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [
Article(**self.page_kwargs),
Article(**self.page_kwargs),
Article(**self.page_kwargs),
]
paginator = Paginator(
"blog/index.html", "//blog.my.site/", object_list, settings, 1
)
# The URL *has to* stay absolute (with // in the front), so verify that # The URL *has to* stay absolute (with // in the front), so verify that
page1 = paginator.page(1) page1 = paginator.page(1)
self.assertEqual(page1.save_as, 'blog/1/index.html') self.assertEqual(page1.save_as, "blog/1/index.html")
self.assertEqual(page1.url, '//blog.my.site/1/') self.assertEqual(page1.url, "//blog.my.site/1/")
page2 = paginator.page(2) page2 = paginator.page(2)
self.assertEqual(page2.save_as, 'blog/2/index.html') self.assertEqual(page2.save_as, "blog/2/index.html")
self.assertEqual(page2.url, '//blog.my.site/2/') self.assertEqual(page2.url, "//blog.my.site/2/")
page3 = paginator.page(3) page3 = paginator.page(3)
self.assertEqual(page3.save_as, 'blog/index.html') self.assertEqual(page3.save_as, "blog/index.html")
self.assertEqual(page3.url, '//blog.my.site/') self.assertEqual(page3.url, "//blog.my.site/")

View file

@ -20,9 +20,10 @@ from pelican.tests.support import (
) )
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_PATH = os.path.abspath(os.path.join( SAMPLES_PATH = os.path.abspath(
CURRENT_DIR, os.pardir, os.pardir, 'samples')) os.path.join(CURRENT_DIR, os.pardir, os.pardir, "samples")
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output')) )
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, "output"))
INPUT_PATH = os.path.join(SAMPLES_PATH, "content") INPUT_PATH = os.path.join(SAMPLES_PATH, "content")
SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py") SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py")
@ -31,9 +32,9 @@ SAMPLE_FR_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf_FR.py")
def recursiveDiff(dcmp): def recursiveDiff(dcmp):
diff = { diff = {
'diff_files': [os.path.join(dcmp.right, f) for f in dcmp.diff_files], "diff_files": [os.path.join(dcmp.right, f) for f in dcmp.diff_files],
'left_only': [os.path.join(dcmp.right, f) for f in dcmp.left_only], "left_only": [os.path.join(dcmp.right, f) for f in dcmp.left_only],
'right_only': [os.path.join(dcmp.right, f) for f in dcmp.right_only], "right_only": [os.path.join(dcmp.right, f) for f in dcmp.right_only],
} }
for sub_dcmp in dcmp.subdirs.values(): for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).items(): for k, v in recursiveDiff(sub_dcmp).items():
@ -47,11 +48,11 @@ class TestPelican(LoggedTestCase):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
self.temp_path = mkdtemp(prefix='pelicantests.') self.temp_path = mkdtemp(prefix="pelicantests.")
self.temp_cache = mkdtemp(prefix='pelican_cache.') self.temp_cache = mkdtemp(prefix="pelican_cache.")
self.maxDiff = None self.maxDiff = None
self.old_locale = locale.setlocale(locale.LC_ALL) self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, "C")
def tearDown(self): def tearDown(self):
read_settings() # cleanup PYGMENTS_RST_OPTIONS read_settings() # cleanup PYGMENTS_RST_OPTIONS
@ -70,8 +71,8 @@ class TestPelican(LoggedTestCase):
if proc.returncode != 0: if proc.returncode != 0:
msg = self._formatMessage( msg = self._formatMessage(
msg, msg,
"%s and %s differ:\nstdout:\n%s\nstderr\n%s" % "%s and %s differ:\nstdout:\n%s\nstderr\n%s"
(left_path, right_path, out, err) % (left_path, right_path, out, err),
) )
raise self.failureException(msg) raise self.failureException(msg)
@ -85,136 +86,154 @@ class TestPelican(LoggedTestCase):
self.assertTrue( self.assertTrue(
generator_classes[-1] is StaticGenerator, generator_classes[-1] is StaticGenerator,
"StaticGenerator must be the last generator, but it isn't!") "StaticGenerator must be the last generator, but it isn't!",
)
self.assertIsInstance( self.assertIsInstance(
generator_classes, Sequence, generator_classes,
"_get_generator_classes() must return a Sequence to preserve order") Sequence,
"_get_generator_classes() must return a Sequence to preserve order",
)
@skipIfNoExecutable(['git', '--version']) @skipIfNoExecutable(["git", "--version"])
def test_basic_generation_works(self): def test_basic_generation_works(self):
# when running pelican without settings, it should pick up the default # when running pelican without settings, it should pick up the default
# ones and generate correct output without raising any exception # ones and generate correct output without raising any exception
settings = read_settings(path=None, override={ settings = read_settings(
'PATH': INPUT_PATH, path=None,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": INPUT_PATH,
'LOCALE': locale.normalize('en_US'), "OUTPUT_PATH": self.temp_path,
}) "CACHE_PATH": self.temp_cache,
"LOCALE": locale.normalize("en_US"),
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertDirsEqual( self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "basic"))
self.temp_path, os.path.join(OUTPUT_PATH, 'basic')
)
self.assertLogCountEqual( self.assertLogCountEqual(
count=1, count=1,
msg="Unable to find.*skipping url replacement", msg="Unable to find.*skipping url replacement",
level=logging.WARNING) level=logging.WARNING,
)
@skipIfNoExecutable(['git', '--version']) @skipIfNoExecutable(["git", "--version"])
def test_custom_generation_works(self): def test_custom_generation_works(self):
# the same thing with a specified set of settings should work # the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={ settings = read_settings(
'PATH': INPUT_PATH, path=SAMPLE_CONFIG,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": INPUT_PATH,
'LOCALE': locale.normalize('en_US.UTF-8'), "OUTPUT_PATH": self.temp_path,
}) "CACHE_PATH": self.temp_cache,
"LOCALE": locale.normalize("en_US.UTF-8"),
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertDirsEqual( self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "custom"))
self.temp_path, os.path.join(OUTPUT_PATH, 'custom')
)
@skipIfNoExecutable(['git', '--version']) @skipIfNoExecutable(["git", "--version"])
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or @unittest.skipUnless(
locale_available('French'), 'French locale needed') locale_available("fr_FR.UTF-8") or locale_available("French"),
"French locale needed",
)
def test_custom_locale_generation_works(self): def test_custom_locale_generation_works(self):
'''Test that generation with fr_FR.UTF-8 locale works''' """Test that generation with fr_FR.UTF-8 locale works"""
if sys.platform == 'win32': if sys.platform == "win32":
our_locale = 'French' our_locale = "French"
else: else:
our_locale = 'fr_FR.UTF-8' our_locale = "fr_FR.UTF-8"
settings = read_settings(path=SAMPLE_FR_CONFIG, override={ settings = read_settings(
'PATH': INPUT_PATH, path=SAMPLE_FR_CONFIG,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": INPUT_PATH,
'LOCALE': our_locale, "OUTPUT_PATH": self.temp_path,
}) "CACHE_PATH": self.temp_cache,
"LOCALE": our_locale,
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertDirsEqual( self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "custom_locale"))
self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale')
)
def test_theme_static_paths_copy(self): def test_theme_static_paths_copy(self):
# the same thing with a specified set of settings should work # the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={ settings = read_settings(
'PATH': INPUT_PATH, path=SAMPLE_CONFIG,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": INPUT_PATH,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'), "OUTPUT_PATH": self.temp_path,
os.path.join(SAMPLES_PATH, 'kinda'), "CACHE_PATH": self.temp_cache,
os.path.join(SAMPLES_PATH, "THEME_STATIC_PATHS": [
'theme_standard')] os.path.join(SAMPLES_PATH, "very"),
}) os.path.join(SAMPLES_PATH, "kinda"),
os.path.join(SAMPLES_PATH, "theme_standard"),
],
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme') theme_output = os.path.join(self.temp_path, "theme")
extra_path = os.path.join(theme_output, 'exciting', 'new', 'files') extra_path = os.path.join(theme_output, "exciting", "new", "files")
for file in ['a_stylesheet', 'a_template']: for file in ["a_stylesheet", "a_template"]:
self.assertTrue(os.path.exists(os.path.join(theme_output, file))) self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
for file in ['wow!', 'boom!', 'bap!', 'zap!']: for file in ["wow!", "boom!", "bap!", "zap!"]:
self.assertTrue(os.path.exists(os.path.join(extra_path, file))) self.assertTrue(os.path.exists(os.path.join(extra_path, file)))
def test_theme_static_paths_copy_single_file(self): def test_theme_static_paths_copy_single_file(self):
# the same thing with a specified set of settings should work # the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={ settings = read_settings(
'PATH': INPUT_PATH, path=SAMPLE_CONFIG,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": INPUT_PATH,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, "OUTPUT_PATH": self.temp_path,
'theme_standard')] "CACHE_PATH": self.temp_cache,
}) "THEME_STATIC_PATHS": [os.path.join(SAMPLES_PATH, "theme_standard")],
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme') theme_output = os.path.join(self.temp_path, "theme")
for file in ['a_stylesheet', 'a_template']: for file in ["a_stylesheet", "a_template"]:
self.assertTrue(os.path.exists(os.path.join(theme_output, file))) self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
def test_write_only_selected(self): def test_write_only_selected(self):
"""Test that only the selected files are written""" """Test that only the selected files are written"""
settings = read_settings(path=None, override={ settings = read_settings(
'PATH': INPUT_PATH, path=None,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": INPUT_PATH,
'WRITE_SELECTED': [ "OUTPUT_PATH": self.temp_path,
os.path.join(self.temp_path, 'oh-yeah.html'), "CACHE_PATH": self.temp_cache,
os.path.join(self.temp_path, 'categories.html'), "WRITE_SELECTED": [
], os.path.join(self.temp_path, "oh-yeah.html"),
'LOCALE': locale.normalize('en_US'), os.path.join(self.temp_path, "categories.html"),
}) ],
"LOCALE": locale.normalize("en_US"),
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
logger = logging.getLogger() logger = logging.getLogger()
orig_level = logger.getEffectiveLevel() orig_level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
mute(True)(pelican.run)() mute(True)(pelican.run)()
logger.setLevel(orig_level) logger.setLevel(orig_level)
self.assertLogCountEqual( self.assertLogCountEqual(count=2, msg="Writing .*", level=logging.INFO)
count=2,
msg="Writing .*",
level=logging.INFO)
def test_cyclic_intersite_links_no_warnings(self): def test_cyclic_intersite_links_no_warnings(self):
settings = read_settings(path=None, override={ settings = read_settings(
'PATH': os.path.join(CURRENT_DIR, 'cyclic_intersite_links'), path=None,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": os.path.join(CURRENT_DIR, "cyclic_intersite_links"),
}) "OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
# There are four different intersite links: # There are four different intersite links:
@ -230,41 +249,48 @@ class TestPelican(LoggedTestCase):
self.assertLogCountEqual( self.assertLogCountEqual(
count=1, count=1,
msg="Unable to find '.*\\.rst', skipping url replacement.", msg="Unable to find '.*\\.rst', skipping url replacement.",
level=logging.WARNING) level=logging.WARNING,
)
def test_md_extensions_deprecation(self): def test_md_extensions_deprecation(self):
"""Test that a warning is issued if MD_EXTENSIONS is used""" """Test that a warning is issued if MD_EXTENSIONS is used"""
settings = read_settings(path=None, override={ settings = read_settings(
'PATH': INPUT_PATH, path=None,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": INPUT_PATH,
'MD_EXTENSIONS': {}, "OUTPUT_PATH": self.temp_path,
}) "CACHE_PATH": self.temp_cache,
"MD_EXTENSIONS": {},
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertLogCountEqual( self.assertLogCountEqual(
count=1, count=1,
msg="MD_EXTENSIONS is deprecated use MARKDOWN instead.", msg="MD_EXTENSIONS is deprecated use MARKDOWN instead.",
level=logging.WARNING) level=logging.WARNING,
)
def test_parse_errors(self): def test_parse_errors(self):
# Verify that just an error is printed and the application doesn't # Verify that just an error is printed and the application doesn't
# abort, exit or something. # abort, exit or something.
settings = read_settings(path=None, override={ settings = read_settings(
'PATH': os.path.abspath(os.path.join(CURRENT_DIR, 'parse_error')), path=None,
'OUTPUT_PATH': self.temp_path, override={
'CACHE_PATH': self.temp_cache, "PATH": os.path.abspath(os.path.join(CURRENT_DIR, "parse_error")),
}) "OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
},
)
pelican = Pelican(settings=settings) pelican = Pelican(settings=settings)
mute(True)(pelican.run)() mute(True)(pelican.run)()
self.assertLogCountEqual( self.assertLogCountEqual(
count=1, count=1, msg="Could not process .*parse_error.rst", level=logging.ERROR
msg="Could not process .*parse_error.rst", )
level=logging.ERROR)
def test_module_load(self): def test_module_load(self):
"""Test loading via python -m pelican --help displays the help""" """Test loading via python -m pelican --help displays the help"""
output = subprocess.check_output([ output = subprocess.check_output(
sys.executable, '-m', 'pelican', '--help' [sys.executable, "-m", "pelican", "--help"]
]).decode('ascii', 'replace') ).decode("ascii", "replace")
assert 'usage:' in output assert "usage:" in output

View file

@ -2,27 +2,26 @@ import os
from contextlib import contextmanager from contextlib import contextmanager
import pelican.tests.dummy_plugins.normal_plugin.normal_plugin as normal_plugin import pelican.tests.dummy_plugins.normal_plugin.normal_plugin as normal_plugin
from pelican.plugins._utils import (get_namespace_plugins, get_plugin_name, from pelican.plugins._utils import get_namespace_plugins, get_plugin_name, load_plugins
load_plugins)
from pelican.tests.support import unittest from pelican.tests.support import unittest
@contextmanager @contextmanager
def tmp_namespace_path(path): def tmp_namespace_path(path):
'''Context manager for temporarily appending namespace plugin packages """Context manager for temporarily appending namespace plugin packages
path: path containing the `pelican` folder path: path containing the `pelican` folder
This modifies the `pelican.__path__` and lets the `pelican.plugins` This modifies the `pelican.__path__` and lets the `pelican.plugins`
namespace package resolve it from that. namespace package resolve it from that.
''' """
# This avoids calls to internal `pelican.plugins.__path__._recalculate()` # This avoids calls to internal `pelican.plugins.__path__._recalculate()`
# as it should not be necessary # as it should not be necessary
import pelican import pelican
old_path = pelican.__path__[:] old_path = pelican.__path__[:]
try: try:
pelican.__path__.append(os.path.join(path, 'pelican')) pelican.__path__.append(os.path.join(path, "pelican"))
yield yield
finally: finally:
pelican.__path__ = old_path pelican.__path__ = old_path
@ -30,38 +29,38 @@ def tmp_namespace_path(path):
class PluginTest(unittest.TestCase): class PluginTest(unittest.TestCase):
_PLUGIN_FOLDER = os.path.join( _PLUGIN_FOLDER = os.path.join(
os.path.abspath(os.path.dirname(__file__)), os.path.abspath(os.path.dirname(__file__)), "dummy_plugins"
'dummy_plugins') )
_NS_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, 'namespace_plugin') _NS_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, "namespace_plugin")
_NORMAL_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, 'normal_plugin') _NORMAL_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, "normal_plugin")
def test_namespace_path_modification(self): def test_namespace_path_modification(self):
import pelican import pelican
import pelican.plugins import pelican.plugins
old_path = pelican.__path__[:] old_path = pelican.__path__[:]
# not existing path # not existing path
path = os.path.join(self._PLUGIN_FOLDER, 'foo') path = os.path.join(self._PLUGIN_FOLDER, "foo")
with tmp_namespace_path(path): with tmp_namespace_path(path):
self.assertIn( self.assertIn(os.path.join(path, "pelican"), pelican.__path__)
os.path.join(path, 'pelican'),
pelican.__path__)
# foo/pelican does not exist, so it won't propagate # foo/pelican does not exist, so it won't propagate
self.assertNotIn( self.assertNotIn(
os.path.join(path, 'pelican', 'plugins'), os.path.join(path, "pelican", "plugins"), pelican.plugins.__path__
pelican.plugins.__path__) )
# verify that we restored path back # verify that we restored path back
self.assertEqual(pelican.__path__, old_path) self.assertEqual(pelican.__path__, old_path)
# existing path # existing path
with tmp_namespace_path(self._NS_PLUGIN_FOLDER): with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
self.assertIn( self.assertIn(
os.path.join(self._NS_PLUGIN_FOLDER, 'pelican'), os.path.join(self._NS_PLUGIN_FOLDER, "pelican"), pelican.__path__
pelican.__path__) )
# /namespace_plugin/pelican exists, so it should be in # /namespace_plugin/pelican exists, so it should be in
self.assertIn( self.assertIn(
os.path.join(self._NS_PLUGIN_FOLDER, 'pelican', 'plugins'), os.path.join(self._NS_PLUGIN_FOLDER, "pelican", "plugins"),
pelican.plugins.__path__) pelican.plugins.__path__,
)
self.assertEqual(pelican.__path__, old_path) self.assertEqual(pelican.__path__, old_path)
def test_get_namespace_plugins(self): def test_get_namespace_plugins(self):
@ -71,11 +70,11 @@ class PluginTest(unittest.TestCase):
# with plugin # with plugin
with tmp_namespace_path(self._NS_PLUGIN_FOLDER): with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
ns_plugins = get_namespace_plugins() ns_plugins = get_namespace_plugins()
self.assertEqual(len(ns_plugins), len(existing_ns_plugins)+1) self.assertEqual(len(ns_plugins), len(existing_ns_plugins) + 1)
self.assertIn('pelican.plugins.ns_plugin', ns_plugins) self.assertIn("pelican.plugins.ns_plugin", ns_plugins)
self.assertEqual( self.assertEqual(
ns_plugins['pelican.plugins.ns_plugin'].NAME, ns_plugins["pelican.plugins.ns_plugin"].NAME, "namespace plugin"
'namespace plugin') )
# should be back to existing namespace plugins outside `with` # should be back to existing namespace plugins outside `with`
ns_plugins = get_namespace_plugins() ns_plugins = get_namespace_plugins()
@ -91,15 +90,14 @@ class PluginTest(unittest.TestCase):
with tmp_namespace_path(self._NS_PLUGIN_FOLDER): with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
# with no `PLUGINS` setting, load namespace plugins # with no `PLUGINS` setting, load namespace plugins
plugins = load_plugins({}) plugins = load_plugins({})
self.assertEqual(len(plugins), len(existing_ns_plugins)+1, plugins) self.assertEqual(len(plugins), len(existing_ns_plugins) + 1, plugins)
self.assertEqual( self.assertEqual(
{'pelican.plugins.ns_plugin'} | get_plugin_names(existing_ns_plugins), {"pelican.plugins.ns_plugin"} | get_plugin_names(existing_ns_plugins),
get_plugin_names(plugins)) get_plugin_names(plugins),
)
# disable namespace plugins with `PLUGINS = []` # disable namespace plugins with `PLUGINS = []`
SETTINGS = { SETTINGS = {"PLUGINS": []}
'PLUGINS': []
}
plugins = load_plugins(SETTINGS) plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 0, plugins) self.assertEqual(len(plugins), 0, plugins)
@ -107,34 +105,35 @@ class PluginTest(unittest.TestCase):
# normal plugin # normal plugin
SETTINGS = { SETTINGS = {
'PLUGINS': ['normal_plugin'], "PLUGINS": ["normal_plugin"],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER] "PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
} }
plugins = load_plugins(SETTINGS) plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins) self.assertEqual(len(plugins), 1, plugins)
self.assertEqual( self.assertEqual({"normal_plugin"}, get_plugin_names(plugins))
{'normal_plugin'},
get_plugin_names(plugins))
# normal submodule/subpackage plugins # normal submodule/subpackage plugins
SETTINGS = { SETTINGS = {
'PLUGINS': [ "PLUGINS": [
'normal_submodule_plugin.subplugin', "normal_submodule_plugin.subplugin",
'normal_submodule_plugin.subpackage.subpackage', "normal_submodule_plugin.subpackage.subpackage",
], ],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER] "PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
} }
plugins = load_plugins(SETTINGS) plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 2, plugins) self.assertEqual(len(plugins), 2, plugins)
self.assertEqual( self.assertEqual(
{'normal_submodule_plugin.subplugin', {
'normal_submodule_plugin.subpackage.subpackage'}, "normal_submodule_plugin.subplugin",
get_plugin_names(plugins)) "normal_submodule_plugin.subpackage.subpackage",
},
get_plugin_names(plugins),
)
# ensure normal plugins are loaded only once # ensure normal plugins are loaded only once
SETTINGS = { SETTINGS = {
'PLUGINS': ['normal_plugin'], "PLUGINS": ["normal_plugin"],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER], "PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
} }
plugins = load_plugins(SETTINGS) plugins = load_plugins(SETTINGS)
for plugin in load_plugins(SETTINGS): for plugin in load_plugins(SETTINGS):
@ -143,40 +142,33 @@ class PluginTest(unittest.TestCase):
self.assertIn(plugin, plugins) self.assertIn(plugin, plugins)
# namespace plugin short # namespace plugin short
SETTINGS = { SETTINGS = {"PLUGINS": ["ns_plugin"]}
'PLUGINS': ['ns_plugin']
}
plugins = load_plugins(SETTINGS) plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins) self.assertEqual(len(plugins), 1, plugins)
self.assertEqual( self.assertEqual({"pelican.plugins.ns_plugin"}, get_plugin_names(plugins))
{'pelican.plugins.ns_plugin'},
get_plugin_names(plugins))
# namespace plugin long # namespace plugin long
SETTINGS = { SETTINGS = {"PLUGINS": ["pelican.plugins.ns_plugin"]}
'PLUGINS': ['pelican.plugins.ns_plugin']
}
plugins = load_plugins(SETTINGS) plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins) self.assertEqual(len(plugins), 1, plugins)
self.assertEqual( self.assertEqual({"pelican.plugins.ns_plugin"}, get_plugin_names(plugins))
{'pelican.plugins.ns_plugin'},
get_plugin_names(plugins))
# normal and namespace plugin # normal and namespace plugin
SETTINGS = { SETTINGS = {
'PLUGINS': ['normal_plugin', 'ns_plugin'], "PLUGINS": ["normal_plugin", "ns_plugin"],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER] "PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
} }
plugins = load_plugins(SETTINGS) plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 2, plugins) self.assertEqual(len(plugins), 2, plugins)
self.assertEqual( self.assertEqual(
{'normal_plugin', 'pelican.plugins.ns_plugin'}, {"normal_plugin", "pelican.plugins.ns_plugin"},
get_plugin_names(plugins)) get_plugin_names(plugins),
)
def test_get_plugin_name(self): def test_get_plugin_name(self):
self.assertEqual( self.assertEqual(
get_plugin_name(normal_plugin), get_plugin_name(normal_plugin),
'pelican.tests.dummy_plugins.normal_plugin.normal_plugin', "pelican.tests.dummy_plugins.normal_plugin.normal_plugin",
) )
class NoopPlugin: class NoopPlugin:
@ -185,7 +177,9 @@ class PluginTest(unittest.TestCase):
self.assertEqual( self.assertEqual(
get_plugin_name(NoopPlugin), get_plugin_name(NoopPlugin),
'PluginTest.test_get_plugin_name.<locals>.NoopPlugin') "PluginTest.test_get_plugin_name.<locals>.NoopPlugin",
)
self.assertEqual( self.assertEqual(
get_plugin_name(NoopPlugin()), get_plugin_name(NoopPlugin()),
'PluginTest.test_get_plugin_name.<locals>.NoopPlugin') "PluginTest.test_get_plugin_name.<locals>.NoopPlugin",
)

File diff suppressed because it is too large Load diff

View file

@ -6,11 +6,11 @@ from pelican.tests.support import unittest
class Test_abbr_role(unittest.TestCase): class Test_abbr_role(unittest.TestCase):
def call_it(self, text): def call_it(self, text):
from pelican.rstdirectives import abbr_role from pelican.rstdirectives import abbr_role
rawtext = text rawtext = text
lineno = 42 lineno = 42
inliner = Mock(name='inliner') inliner = Mock(name="inliner")
nodes, system_messages = abbr_role( nodes, system_messages = abbr_role("abbr", rawtext, text, lineno, inliner)
'abbr', rawtext, text, lineno, inliner)
self.assertEqual(system_messages, []) self.assertEqual(system_messages, [])
self.assertEqual(len(nodes), 1) self.assertEqual(len(nodes), 1)
return nodes[0] return nodes[0]
@ -18,14 +18,14 @@ class Test_abbr_role(unittest.TestCase):
def test(self): def test(self):
node = self.call_it("Abbr (Abbreviation)") node = self.call_it("Abbr (Abbreviation)")
self.assertEqual(node.astext(), "Abbr") self.assertEqual(node.astext(), "Abbr")
self.assertEqual(node['explanation'], "Abbreviation") self.assertEqual(node["explanation"], "Abbreviation")
def test_newlines_in_explanation(self): def test_newlines_in_explanation(self):
node = self.call_it("CUL (See you\nlater)") node = self.call_it("CUL (See you\nlater)")
self.assertEqual(node.astext(), "CUL") self.assertEqual(node.astext(), "CUL")
self.assertEqual(node['explanation'], "See you\nlater") self.assertEqual(node["explanation"], "See you\nlater")
def test_newlines_in_abbr(self): def test_newlines_in_abbr(self):
node = self.call_it("US of\nA \n (USA)") node = self.call_it("US of\nA \n (USA)")
self.assertEqual(node.astext(), "US of\nA") self.assertEqual(node.astext(), "US of\nA")
self.assertEqual(node['explanation'], "USA") self.assertEqual(node["explanation"], "USA")

View file

@ -17,10 +17,9 @@ class MockServer:
class TestServer(unittest.TestCase): class TestServer(unittest.TestCase):
def setUp(self): def setUp(self):
self.server = MockServer() self.server = MockServer()
self.temp_output = mkdtemp(prefix='pelicantests.') self.temp_output = mkdtemp(prefix="pelicantests.")
self.old_cwd = os.getcwd() self.old_cwd = os.getcwd()
os.chdir(self.temp_output) os.chdir(self.temp_output)
@ -29,32 +28,33 @@ class TestServer(unittest.TestCase):
rmtree(self.temp_output) rmtree(self.temp_output)
def test_get_path_that_exists(self): def test_get_path_that_exists(self):
handler = ComplexHTTPRequestHandler(MockRequest(), ('0.0.0.0', 8888), handler = ComplexHTTPRequestHandler(
self.server) MockRequest(), ("0.0.0.0", 8888), self.server
)
handler.base_path = self.temp_output handler.base_path = self.temp_output
open(os.path.join(self.temp_output, 'foo.html'), 'a').close() open(os.path.join(self.temp_output, "foo.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, 'foo')) os.mkdir(os.path.join(self.temp_output, "foo"))
open(os.path.join(self.temp_output, 'foo', 'index.html'), 'a').close() open(os.path.join(self.temp_output, "foo", "index.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, 'bar')) os.mkdir(os.path.join(self.temp_output, "bar"))
open(os.path.join(self.temp_output, 'bar', 'index.html'), 'a').close() open(os.path.join(self.temp_output, "bar", "index.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, 'baz')) os.mkdir(os.path.join(self.temp_output, "baz"))
for suffix in ['', '/']: for suffix in ["", "/"]:
# foo.html has precedence over foo/index.html # foo.html has precedence over foo/index.html
path = handler.get_path_that_exists('foo' + suffix) path = handler.get_path_that_exists("foo" + suffix)
self.assertEqual(path, 'foo.html') self.assertEqual(path, "foo.html")
# folder with index.html should return folder/index.html # folder with index.html should return folder/index.html
path = handler.get_path_that_exists('bar' + suffix) path = handler.get_path_that_exists("bar" + suffix)
self.assertEqual(path, 'bar/index.html') self.assertEqual(path, "bar/index.html")
# folder without index.html should return same as input # folder without index.html should return same as input
path = handler.get_path_that_exists('baz' + suffix) path = handler.get_path_that_exists("baz" + suffix)
self.assertEqual(path, 'baz' + suffix) self.assertEqual(path, "baz" + suffix)
# not existing path should return None # not existing path should return None
path = handler.get_path_that_exists('quux' + suffix) path = handler.get_path_that_exists("quux" + suffix)
self.assertIsNone(path) self.assertIsNone(path)

View file

@ -4,10 +4,14 @@ import os
from os.path import abspath, dirname, join from os.path import abspath, dirname, join
from pelican.settings import (DEFAULT_CONFIG, DEFAULT_THEME, from pelican.settings import (
_printf_s_to_format_field, DEFAULT_CONFIG,
configure_settings, DEFAULT_THEME,
handle_deprecated_settings, read_settings) _printf_s_to_format_field,
configure_settings,
handle_deprecated_settings,
read_settings,
)
from pelican.tests.support import unittest from pelican.tests.support import unittest
@ -16,40 +20,39 @@ class TestSettingsConfiguration(unittest.TestCase):
append new values to the settings (if any), and apply basic settings append new values to the settings (if any), and apply basic settings
optimizations. optimizations.
""" """
def setUp(self): def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL) self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, "C")
self.PATH = abspath(dirname(__file__)) self.PATH = abspath(dirname(__file__))
default_conf = join(self.PATH, 'default_conf.py') default_conf = join(self.PATH, "default_conf.py")
self.settings = read_settings(default_conf) self.settings = read_settings(default_conf)
def tearDown(self): def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale) locale.setlocale(locale.LC_ALL, self.old_locale)
def test_overwrite_existing_settings(self): def test_overwrite_existing_settings(self):
self.assertEqual(self.settings.get('SITENAME'), "Alexis' log") self.assertEqual(self.settings.get("SITENAME"), "Alexis' log")
self.assertEqual( self.assertEqual(self.settings.get("SITEURL"), "http://blog.notmyidea.org")
self.settings.get('SITEURL'),
'http://blog.notmyidea.org')
def test_keep_default_settings(self): def test_keep_default_settings(self):
# Keep default settings if not defined. # Keep default settings if not defined.
self.assertEqual( self.assertEqual(
self.settings.get('DEFAULT_CATEGORY'), self.settings.get("DEFAULT_CATEGORY"), DEFAULT_CONFIG["DEFAULT_CATEGORY"]
DEFAULT_CONFIG['DEFAULT_CATEGORY']) )
def test_dont_copy_small_keys(self): def test_dont_copy_small_keys(self):
# Do not copy keys not in caps. # Do not copy keys not in caps.
self.assertNotIn('foobar', self.settings) self.assertNotIn("foobar", self.settings)
def test_read_empty_settings(self): def test_read_empty_settings(self):
# Ensure an empty settings file results in default settings. # Ensure an empty settings file results in default settings.
settings = read_settings(None) settings = read_settings(None)
expected = copy.deepcopy(DEFAULT_CONFIG) expected = copy.deepcopy(DEFAULT_CONFIG)
# Added by configure settings # Added by configure settings
expected['FEED_DOMAIN'] = '' expected["FEED_DOMAIN"] = ""
expected['ARTICLE_EXCLUDES'] = ['pages'] expected["ARTICLE_EXCLUDES"] = ["pages"]
expected['PAGE_EXCLUDES'] = [''] expected["PAGE_EXCLUDES"] = [""]
self.maxDiff = None self.maxDiff = None
self.assertDictEqual(settings, expected) self.assertDictEqual(settings, expected)
@ -57,250 +60,265 @@ class TestSettingsConfiguration(unittest.TestCase):
# Make sure that the results from one settings call doesn't # Make sure that the results from one settings call doesn't
# effect past or future instances. # effect past or future instances.
self.PATH = abspath(dirname(__file__)) self.PATH = abspath(dirname(__file__))
default_conf = join(self.PATH, 'default_conf.py') default_conf = join(self.PATH, "default_conf.py")
settings = read_settings(default_conf) settings = read_settings(default_conf)
settings['SITEURL'] = 'new-value' settings["SITEURL"] = "new-value"
new_settings = read_settings(default_conf) new_settings = read_settings(default_conf)
self.assertNotEqual(new_settings['SITEURL'], settings['SITEURL']) self.assertNotEqual(new_settings["SITEURL"], settings["SITEURL"])
def test_defaults_not_overwritten(self): def test_defaults_not_overwritten(self):
# This assumes 'SITENAME': 'A Pelican Blog' # This assumes 'SITENAME': 'A Pelican Blog'
settings = read_settings(None) settings = read_settings(None)
settings['SITENAME'] = 'Not a Pelican Blog' settings["SITENAME"] = "Not a Pelican Blog"
self.assertNotEqual(settings['SITENAME'], DEFAULT_CONFIG['SITENAME']) self.assertNotEqual(settings["SITENAME"], DEFAULT_CONFIG["SITENAME"])
def test_static_path_settings_safety(self): def test_static_path_settings_safety(self):
# Disallow static paths from being strings # Disallow static paths from being strings
settings = { settings = {
'STATIC_PATHS': 'foo/bar', "STATIC_PATHS": "foo/bar",
'THEME_STATIC_PATHS': 'bar/baz', "THEME_STATIC_PATHS": "bar/baz",
# These 4 settings are required to run configure_settings # These 4 settings are required to run configure_settings
'PATH': '.', "PATH": ".",
'THEME': DEFAULT_THEME, "THEME": DEFAULT_THEME,
'SITEURL': 'http://blog.notmyidea.org/', "SITEURL": "http://blog.notmyidea.org/",
'LOCALE': '', "LOCALE": "",
} }
configure_settings(settings) configure_settings(settings)
self.assertEqual(settings["STATIC_PATHS"], DEFAULT_CONFIG["STATIC_PATHS"])
self.assertEqual( self.assertEqual(
settings['STATIC_PATHS'], settings["THEME_STATIC_PATHS"], DEFAULT_CONFIG["THEME_STATIC_PATHS"]
DEFAULT_CONFIG['STATIC_PATHS']) )
self.assertEqual(
settings['THEME_STATIC_PATHS'],
DEFAULT_CONFIG['THEME_STATIC_PATHS'])
def test_configure_settings(self): def test_configure_settings(self):
# Manipulations to settings should be applied correctly. # Manipulations to settings should be applied correctly.
settings = { settings = {
'SITEURL': 'http://blog.notmyidea.org/', "SITEURL": "http://blog.notmyidea.org/",
'LOCALE': '', "LOCALE": "",
'PATH': os.curdir, "PATH": os.curdir,
'THEME': DEFAULT_THEME, "THEME": DEFAULT_THEME,
} }
configure_settings(settings) configure_settings(settings)
# SITEURL should not have a trailing slash # SITEURL should not have a trailing slash
self.assertEqual(settings['SITEURL'], 'http://blog.notmyidea.org') self.assertEqual(settings["SITEURL"], "http://blog.notmyidea.org")
# FEED_DOMAIN, if undefined, should default to SITEURL # FEED_DOMAIN, if undefined, should default to SITEURL
self.assertEqual(settings['FEED_DOMAIN'], 'http://blog.notmyidea.org') self.assertEqual(settings["FEED_DOMAIN"], "http://blog.notmyidea.org")
settings['FEED_DOMAIN'] = 'http://feeds.example.com' settings["FEED_DOMAIN"] = "http://feeds.example.com"
configure_settings(settings) configure_settings(settings)
self.assertEqual(settings['FEED_DOMAIN'], 'http://feeds.example.com') self.assertEqual(settings["FEED_DOMAIN"], "http://feeds.example.com")
def test_theme_settings_exceptions(self): def test_theme_settings_exceptions(self):
settings = self.settings settings = self.settings
# Check that theme lookup in "pelican/themes" functions as expected # Check that theme lookup in "pelican/themes" functions as expected
settings['THEME'] = os.path.split(settings['THEME'])[1] settings["THEME"] = os.path.split(settings["THEME"])[1]
configure_settings(settings) configure_settings(settings)
self.assertEqual(settings['THEME'], DEFAULT_THEME) self.assertEqual(settings["THEME"], DEFAULT_THEME)
# Check that non-existent theme raises exception # Check that non-existent theme raises exception
settings['THEME'] = 'foo' settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings) self.assertRaises(Exception, configure_settings, settings)
def test_deprecated_dir_setting(self): def test_deprecated_dir_setting(self):
settings = self.settings settings = self.settings
settings['ARTICLE_DIR'] = 'foo' settings["ARTICLE_DIR"] = "foo"
settings['PAGE_DIR'] = 'bar' settings["PAGE_DIR"] = "bar"
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertEqual(settings['ARTICLE_PATHS'], ['foo']) self.assertEqual(settings["ARTICLE_PATHS"], ["foo"])
self.assertEqual(settings['PAGE_PATHS'], ['bar']) self.assertEqual(settings["PAGE_PATHS"], ["bar"])
with self.assertRaises(KeyError): with self.assertRaises(KeyError):
settings['ARTICLE_DIR'] settings["ARTICLE_DIR"]
settings['PAGE_DIR'] settings["PAGE_DIR"]
def test_default_encoding(self): def test_default_encoding(self):
# Test that the user locale is set if not specified in settings # Test that the user locale is set if not specified in settings
locale.setlocale(locale.LC_ALL, 'C') locale.setlocale(locale.LC_ALL, "C")
# empty string = user system locale # empty string = user system locale
self.assertEqual(self.settings['LOCALE'], ['']) self.assertEqual(self.settings["LOCALE"], [""])
configure_settings(self.settings) configure_settings(self.settings)
lc_time = locale.getlocale(locale.LC_TIME) # should be set to user locale lc_time = locale.getlocale(locale.LC_TIME) # should be set to user locale
# explicitly set locale to user pref and test # explicitly set locale to user pref and test
locale.setlocale(locale.LC_TIME, '') locale.setlocale(locale.LC_TIME, "")
self.assertEqual(lc_time, locale.getlocale(locale.LC_TIME)) self.assertEqual(lc_time, locale.getlocale(locale.LC_TIME))
def test_invalid_settings_throw_exception(self): def test_invalid_settings_throw_exception(self):
# Test that the path name is valid # Test that the path name is valid
# test that 'PATH' is set # test that 'PATH' is set
settings = { settings = {}
}
self.assertRaises(Exception, configure_settings, settings) self.assertRaises(Exception, configure_settings, settings)
# Test that 'PATH' is valid # Test that 'PATH' is valid
settings['PATH'] = '' settings["PATH"] = ""
self.assertRaises(Exception, configure_settings, settings) self.assertRaises(Exception, configure_settings, settings)
# Test nonexistent THEME # Test nonexistent THEME
settings['PATH'] = os.curdir settings["PATH"] = os.curdir
settings['THEME'] = 'foo' settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings) self.assertRaises(Exception, configure_settings, settings)
def test__printf_s_to_format_field(self): def test__printf_s_to_format_field(self):
for s in ('%s', '{%s}', '{%s'): for s in ("%s", "{%s}", "{%s"):
option = 'foo/{}/bar.baz'.format(s) option = "foo/{}/bar.baz".format(s)
result = _printf_s_to_format_field(option, 'slug') result = _printf_s_to_format_field(option, "slug")
expected = option % 'qux' expected = option % "qux"
found = result.format(slug='qux') found = result.format(slug="qux")
self.assertEqual(expected, found) self.assertEqual(expected, found)
def test_deprecated_extra_templates_paths(self): def test_deprecated_extra_templates_paths(self):
settings = self.settings settings = self.settings
settings['EXTRA_TEMPLATES_PATHS'] = ['/foo/bar', '/ha'] settings["EXTRA_TEMPLATES_PATHS"] = ["/foo/bar", "/ha"]
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertEqual(settings['THEME_TEMPLATES_OVERRIDES'], self.assertEqual(settings["THEME_TEMPLATES_OVERRIDES"], ["/foo/bar", "/ha"])
['/foo/bar', '/ha']) self.assertNotIn("EXTRA_TEMPLATES_PATHS", settings)
self.assertNotIn('EXTRA_TEMPLATES_PATHS', settings)
def test_deprecated_paginated_direct_templates(self): def test_deprecated_paginated_direct_templates(self):
settings = self.settings settings = self.settings
settings['PAGINATED_DIRECT_TEMPLATES'] = ['index', 'archives'] settings["PAGINATED_DIRECT_TEMPLATES"] = ["index", "archives"]
settings['PAGINATED_TEMPLATES'] = {'index': 10, 'category': None} settings["PAGINATED_TEMPLATES"] = {"index": 10, "category": None}
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertEqual(settings['PAGINATED_TEMPLATES'], self.assertEqual(
{'index': 10, 'category': None, 'archives': None}) settings["PAGINATED_TEMPLATES"],
self.assertNotIn('PAGINATED_DIRECT_TEMPLATES', settings) {"index": 10, "category": None, "archives": None},
)
self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_deprecated_paginated_direct_templates_from_file(self): def test_deprecated_paginated_direct_templates_from_file(self):
# This is equivalent to reading a settings file that has # This is equivalent to reading a settings file that has
# PAGINATED_DIRECT_TEMPLATES defined but no PAGINATED_TEMPLATES. # PAGINATED_DIRECT_TEMPLATES defined but no PAGINATED_TEMPLATES.
settings = read_settings(None, override={ settings = read_settings(
'PAGINATED_DIRECT_TEMPLATES': ['index', 'archives'] None, override={"PAGINATED_DIRECT_TEMPLATES": ["index", "archives"]}
}) )
self.assertEqual(settings['PAGINATED_TEMPLATES'], { self.assertEqual(
'archives': None, settings["PAGINATED_TEMPLATES"],
'author': None, {
'index': None, "archives": None,
'category': None, "author": None,
'tag': None}) "index": None,
self.assertNotIn('PAGINATED_DIRECT_TEMPLATES', settings) "category": None,
"tag": None,
},
)
self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_theme_and_extra_templates_exception(self): def test_theme_and_extra_templates_exception(self):
settings = self.settings settings = self.settings
settings['EXTRA_TEMPLATES_PATHS'] = ['/ha'] settings["EXTRA_TEMPLATES_PATHS"] = ["/ha"]
settings['THEME_TEMPLATES_OVERRIDES'] = ['/foo/bar'] settings["THEME_TEMPLATES_OVERRIDES"] = ["/foo/bar"]
self.assertRaises(Exception, handle_deprecated_settings, settings) self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_slug_and_slug_regex_substitutions_exception(self): def test_slug_and_slug_regex_substitutions_exception(self):
settings = {} settings = {}
settings['SLUG_REGEX_SUBSTITUTIONS'] = [('C++', 'cpp')] settings["SLUG_REGEX_SUBSTITUTIONS"] = [("C++", "cpp")]
settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')] settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
self.assertRaises(Exception, handle_deprecated_settings, settings) self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_deprecated_slug_substitutions(self): def test_deprecated_slug_substitutions(self):
default_slug_regex_subs = self.settings['SLUG_REGEX_SUBSTITUTIONS'] default_slug_regex_subs = self.settings["SLUG_REGEX_SUBSTITUTIONS"]
# If no deprecated setting is set, don't set new ones # If no deprecated setting is set, don't set new ones
settings = {} settings = {}
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertNotIn('SLUG_REGEX_SUBSTITUTIONS', settings) self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn('TAG_REGEX_SUBSTITUTIONS', settings) self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn('CATEGORY_REGEX_SUBSTITUTIONS', settings) self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn('AUTHOR_REGEX_SUBSTITUTIONS', settings) self.assertNotIn("AUTHOR_REGEX_SUBSTITUTIONS", settings)
# If SLUG_SUBSTITUTIONS is set, set {SLUG, AUTHOR}_REGEX_SUBSTITUTIONS # If SLUG_SUBSTITUTIONS is set, set {SLUG, AUTHOR}_REGEX_SUBSTITUTIONS
# correctly, don't set {CATEGORY, TAG}_REGEX_SUBSTITUTIONS # correctly, don't set {CATEGORY, TAG}_REGEX_SUBSTITUTIONS
settings = {} settings = {}
settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp')] settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertEqual(settings.get('SLUG_REGEX_SUBSTITUTIONS'), self.assertEqual(
[(r'C\+\+', 'cpp')] + default_slug_regex_subs) settings.get("SLUG_REGEX_SUBSTITUTIONS"),
self.assertNotIn('TAG_REGEX_SUBSTITUTIONS', settings) [(r"C\+\+", "cpp")] + default_slug_regex_subs,
self.assertNotIn('CATEGORY_REGEX_SUBSTITUTIONS', settings) )
self.assertEqual(settings.get('AUTHOR_REGEX_SUBSTITUTIONS'), self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
default_slug_regex_subs) self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
self.assertEqual(
settings.get("AUTHOR_REGEX_SUBSTITUTIONS"), default_slug_regex_subs
)
# If {CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set # If {CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly, don't set # {CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly, don't set
# SLUG_REGEX_SUBSTITUTIONS # SLUG_REGEX_SUBSTITUTIONS
settings = {} settings = {}
settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')] settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
settings['CATEGORY_SUBSTITUTIONS'] = [('C#', 'csharp')] settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov')] settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertNotIn('SLUG_REGEX_SUBSTITUTIONS', settings) self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
self.assertEqual(settings['TAG_REGEX_SUBSTITUTIONS'], self.assertEqual(
[(r'C\#', 'csharp')] + default_slug_regex_subs) settings["TAG_REGEX_SUBSTITUTIONS"],
self.assertEqual(settings['CATEGORY_REGEX_SUBSTITUTIONS'], [(r"C\#", "csharp")] + default_slug_regex_subs,
[(r'C\#', 'csharp')] + default_slug_regex_subs) )
self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'], self.assertEqual(
[(r'Alexander\ Todorov', 'atodorov')] + settings["CATEGORY_REGEX_SUBSTITUTIONS"],
default_slug_regex_subs) [(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
# If {SLUG, CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set # If {SLUG, CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {SLUG, CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly # {SLUG, CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly
settings = {} settings = {}
settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp')] settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')] settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
settings['CATEGORY_SUBSTITUTIONS'] = [('C#', 'csharp')] settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov')] settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertEqual(settings['TAG_REGEX_SUBSTITUTIONS'], self.assertEqual(
[(r'C\+\+', 'cpp')] + [(r'C\#', 'csharp')] + settings["TAG_REGEX_SUBSTITUTIONS"],
default_slug_regex_subs) [(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
self.assertEqual(settings['CATEGORY_REGEX_SUBSTITUTIONS'], )
[(r'C\+\+', 'cpp')] + [(r'C\#', 'csharp')] + self.assertEqual(
default_slug_regex_subs) settings["CATEGORY_REGEX_SUBSTITUTIONS"],
self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'], [(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
[(r'Alexander\ Todorov', 'atodorov')] + )
default_slug_regex_subs) self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
# Handle old 'skip' flags correctly # Handle old 'skip' flags correctly
settings = {} settings = {}
settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp', True)] settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp", True)]
settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov', settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov", False)]
False)]
settings = handle_deprecated_settings(settings) settings = handle_deprecated_settings(settings)
self.assertEqual(settings.get('SLUG_REGEX_SUBSTITUTIONS'), self.assertEqual(
[(r'C\+\+', 'cpp')] + settings.get("SLUG_REGEX_SUBSTITUTIONS"),
[(r'(?u)\A\s*', ''), (r'(?u)\s*\Z', '')]) [(r"C\+\+", "cpp")] + [(r"(?u)\A\s*", ""), (r"(?u)\s*\Z", "")],
self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'], )
[(r'Alexander\ Todorov', 'atodorov')] + self.assertEqual(
default_slug_regex_subs) settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
def test_deprecated_slug_substitutions_from_file(self): def test_deprecated_slug_substitutions_from_file(self):
# This is equivalent to reading a settings file that has # This is equivalent to reading a settings file that has
# SLUG_SUBSTITUTIONS defined but no SLUG_REGEX_SUBSTITUTIONS. # SLUG_SUBSTITUTIONS defined but no SLUG_REGEX_SUBSTITUTIONS.
settings = read_settings(None, override={ settings = read_settings(
'SLUG_SUBSTITUTIONS': [('C++', 'cpp')] None, override={"SLUG_SUBSTITUTIONS": [("C++", "cpp")]}
}) )
self.assertEqual(settings['SLUG_REGEX_SUBSTITUTIONS'], self.assertEqual(
[(r'C\+\+', 'cpp')] + settings["SLUG_REGEX_SUBSTITUTIONS"],
self.settings['SLUG_REGEX_SUBSTITUTIONS']) [(r"C\+\+", "cpp")] + self.settings["SLUG_REGEX_SUBSTITUTIONS"],
self.assertNotIn('SLUG_SUBSTITUTIONS', settings) )
self.assertNotIn("SLUG_SUBSTITUTIONS", settings)

View file

@ -4,7 +4,6 @@ from pelican.tests.support import unittest
class TestSuiteTest(unittest.TestCase): class TestSuiteTest(unittest.TestCase):
def test_error_on_warning(self): def test_error_on_warning(self):
with self.assertRaises(UserWarning): with self.assertRaises(UserWarning):
warnings.warn('test warning') warnings.warn("test warning")

View file

@ -5,22 +5,22 @@ from pelican.urlwrappers import Author, Category, Tag, URLWrapper
class TestURLWrapper(unittest.TestCase): class TestURLWrapper(unittest.TestCase):
def test_ordering(self): def test_ordering(self):
# URLWrappers are sorted by name # URLWrappers are sorted by name
wrapper_a = URLWrapper(name='first', settings={}) wrapper_a = URLWrapper(name="first", settings={})
wrapper_b = URLWrapper(name='last', settings={}) wrapper_b = URLWrapper(name="last", settings={})
self.assertFalse(wrapper_a > wrapper_b) self.assertFalse(wrapper_a > wrapper_b)
self.assertFalse(wrapper_a >= wrapper_b) self.assertFalse(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b) self.assertFalse(wrapper_a == wrapper_b)
self.assertTrue(wrapper_a != wrapper_b) self.assertTrue(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b) self.assertTrue(wrapper_a <= wrapper_b)
self.assertTrue(wrapper_a < wrapper_b) self.assertTrue(wrapper_a < wrapper_b)
wrapper_b.name = 'first' wrapper_b.name = "first"
self.assertFalse(wrapper_a > wrapper_b) self.assertFalse(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b) self.assertTrue(wrapper_a >= wrapper_b)
self.assertTrue(wrapper_a == wrapper_b) self.assertTrue(wrapper_a == wrapper_b)
self.assertFalse(wrapper_a != wrapper_b) self.assertFalse(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b) self.assertTrue(wrapper_a <= wrapper_b)
self.assertFalse(wrapper_a < wrapper_b) self.assertFalse(wrapper_a < wrapper_b)
wrapper_a.name = 'last' wrapper_a.name = "last"
self.assertTrue(wrapper_a > wrapper_b) self.assertTrue(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b) self.assertTrue(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b) self.assertFalse(wrapper_a == wrapper_b)
@ -29,57 +29,68 @@ class TestURLWrapper(unittest.TestCase):
self.assertFalse(wrapper_a < wrapper_b) self.assertFalse(wrapper_a < wrapper_b)
def test_equality(self): def test_equality(self):
tag = Tag('test', settings={}) tag = Tag("test", settings={})
cat = Category('test', settings={}) cat = Category("test", settings={})
author = Author('test', settings={}) author = Author("test", settings={})
# same name, but different class # same name, but different class
self.assertNotEqual(tag, cat) self.assertNotEqual(tag, cat)
self.assertNotEqual(tag, author) self.assertNotEqual(tag, author)
# should be equal vs text representing the same name # should be equal vs text representing the same name
self.assertEqual(tag, 'test') self.assertEqual(tag, "test")
# should not be equal vs binary # should not be equal vs binary
self.assertNotEqual(tag, b'test') self.assertNotEqual(tag, b"test")
# Tags describing the same should be equal # Tags describing the same should be equal
tag_equal = Tag('Test', settings={}) tag_equal = Tag("Test", settings={})
self.assertEqual(tag, tag_equal) self.assertEqual(tag, tag_equal)
# Author describing the same should be equal # Author describing the same should be equal
author_equal = Author('Test', settings={}) author_equal = Author("Test", settings={})
self.assertEqual(author, author_equal) self.assertEqual(author, author_equal)
cat_ascii = Category('指導書', settings={}) cat_ascii = Category("指導書", settings={})
self.assertEqual(cat_ascii, 'zhi dao shu') self.assertEqual(cat_ascii, "zhi dao shu")
def test_slugify_with_substitutions_and_dots(self): def test_slugify_with_substitutions_and_dots(self):
tag = Tag('Tag Dot', settings={'TAG_REGEX_SUBSTITUTIONS': [ tag = Tag(
('Tag Dot', 'tag.dot'), "Tag Dot",
]}) settings={
cat = Category('Category Dot', "TAG_REGEX_SUBSTITUTIONS": [
settings={'CATEGORY_REGEX_SUBSTITUTIONS': [ ("Tag Dot", "tag.dot"),
('Category Dot', 'cat.dot'), ]
]}) },
)
cat = Category(
"Category Dot",
settings={
"CATEGORY_REGEX_SUBSTITUTIONS": [
("Category Dot", "cat.dot"),
]
},
)
self.assertEqual(tag.slug, 'tag.dot') self.assertEqual(tag.slug, "tag.dot")
self.assertEqual(cat.slug, 'cat.dot') self.assertEqual(cat.slug, "cat.dot")
def test_author_slug_substitutions(self): def test_author_slug_substitutions(self):
settings = {'AUTHOR_REGEX_SUBSTITUTIONS': [ settings = {
('Alexander Todorov', 'atodorov'), "AUTHOR_REGEX_SUBSTITUTIONS": [
('Krasimir Tsonev', 'krasimir'), ("Alexander Todorov", "atodorov"),
(r'[^\w\s-]', ''), ("Krasimir Tsonev", "krasimir"),
(r'(?u)\A\s*', ''), (r"[^\w\s-]", ""),
(r'(?u)\s*\Z', ''), (r"(?u)\A\s*", ""),
(r'[-\s]+', '-'), (r"(?u)\s*\Z", ""),
]} (r"[-\s]+", "-"),
]
}
author1 = Author('Mr. Senko', settings=settings) author1 = Author("Mr. Senko", settings=settings)
author2 = Author('Alexander Todorov', settings=settings) author2 = Author("Alexander Todorov", settings=settings)
author3 = Author('Krasimir Tsonev', settings=settings) author3 = Author("Krasimir Tsonev", settings=settings)
self.assertEqual(author1.slug, 'mr-senko') self.assertEqual(author1.slug, "mr-senko")
self.assertEqual(author2.slug, 'atodorov') self.assertEqual(author2.slug, "atodorov")
self.assertEqual(author3.slug, 'krasimir') self.assertEqual(author3.slug, "krasimir")

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -19,6 +19,7 @@ except ImportError:
try: try:
import tzlocal import tzlocal
if hasattr(tzlocal.get_localzone(), "zone"): if hasattr(tzlocal.get_localzone(), "zone"):
_DEFAULT_TIMEZONE = tzlocal.get_localzone().zone _DEFAULT_TIMEZONE = tzlocal.get_localzone().zone
else: else:
@ -28,55 +29,51 @@ except ModuleNotFoundError:
from pelican import __version__ from pelican import __version__
locale.setlocale(locale.LC_ALL, '') locale.setlocale(locale.LC_ALL, "")
try: try:
_DEFAULT_LANGUAGE = locale.getlocale()[0] _DEFAULT_LANGUAGE = locale.getlocale()[0]
except ValueError: except ValueError:
# Don't fail on macosx: "unknown locale: UTF-8" # Don't fail on macosx: "unknown locale: UTF-8"
_DEFAULT_LANGUAGE = None _DEFAULT_LANGUAGE = None
if _DEFAULT_LANGUAGE is None: if _DEFAULT_LANGUAGE is None:
_DEFAULT_LANGUAGE = 'en' _DEFAULT_LANGUAGE = "en"
else: else:
_DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split('_')[0] _DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split("_")[0]
_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), _TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
"templates")
_jinja_env = Environment( _jinja_env = Environment(
loader=FileSystemLoader(_TEMPLATES_DIR), loader=FileSystemLoader(_TEMPLATES_DIR),
trim_blocks=True, trim_blocks=True,
) )
_GITHUB_PAGES_BRANCHES = { _GITHUB_PAGES_BRANCHES = {"personal": "main", "project": "gh-pages"}
'personal': 'main',
'project': 'gh-pages'
}
CONF = { CONF = {
'pelican': 'pelican', "pelican": "pelican",
'pelicanopts': '', "pelicanopts": "",
'basedir': os.curdir, "basedir": os.curdir,
'ftp_host': 'localhost', "ftp_host": "localhost",
'ftp_user': 'anonymous', "ftp_user": "anonymous",
'ftp_target_dir': '/', "ftp_target_dir": "/",
'ssh_host': 'localhost', "ssh_host": "localhost",
'ssh_port': 22, "ssh_port": 22,
'ssh_user': 'root', "ssh_user": "root",
'ssh_target_dir': '/var/www', "ssh_target_dir": "/var/www",
's3_bucket': 'my_s3_bucket', "s3_bucket": "my_s3_bucket",
'cloudfiles_username': 'my_rackspace_username', "cloudfiles_username": "my_rackspace_username",
'cloudfiles_api_key': 'my_rackspace_api_key', "cloudfiles_api_key": "my_rackspace_api_key",
'cloudfiles_container': 'my_cloudfiles_container', "cloudfiles_container": "my_cloudfiles_container",
'dropbox_dir': '~/Dropbox/Public/', "dropbox_dir": "~/Dropbox/Public/",
'github_pages_branch': _GITHUB_PAGES_BRANCHES['project'], "github_pages_branch": _GITHUB_PAGES_BRANCHES["project"],
'default_pagination': 10, "default_pagination": 10,
'siteurl': '', "siteurl": "",
'lang': _DEFAULT_LANGUAGE, "lang": _DEFAULT_LANGUAGE,
'timezone': _DEFAULT_TIMEZONE "timezone": _DEFAULT_TIMEZONE,
} }
# url for list of valid timezones # url for list of valid timezones
_TZ_URL = 'https://en.wikipedia.org/wiki/List_of_tz_database_time_zones' _TZ_URL = "https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
# Create a 'marked' default path, to determine if someone has supplied # Create a 'marked' default path, to determine if someone has supplied
@ -90,12 +87,12 @@ _DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
def ask(question, answer=str, default=None, length=None): def ask(question, answer=str, default=None, length=None):
if answer == str: if answer == str:
r = '' r = ""
while True: while True:
if default: if default:
r = input('> {} [{}] '.format(question, default)) r = input("> {} [{}] ".format(question, default))
else: else:
r = input('> {} '.format(question)) r = input("> {} ".format(question))
r = r.strip() r = r.strip()
@ -104,10 +101,10 @@ def ask(question, answer=str, default=None, length=None):
r = default r = default
break break
else: else:
print('You must enter something') print("You must enter something")
else: else:
if length and len(r) != length: if length and len(r) != length:
print('Entry must be {} characters long'.format(length)) print("Entry must be {} characters long".format(length))
else: else:
break break
@ -117,18 +114,18 @@ def ask(question, answer=str, default=None, length=None):
r = None r = None
while True: while True:
if default is True: if default is True:
r = input('> {} (Y/n) '.format(question)) r = input("> {} (Y/n) ".format(question))
elif default is False: elif default is False:
r = input('> {} (y/N) '.format(question)) r = input("> {} (y/N) ".format(question))
else: else:
r = input('> {} (y/n) '.format(question)) r = input("> {} (y/n) ".format(question))
r = r.strip().lower() r = r.strip().lower()
if r in ('y', 'yes'): if r in ("y", "yes"):
r = True r = True
break break
elif r in ('n', 'no'): elif r in ("n", "no"):
r = False r = False
break break
elif not r: elif not r:
@ -141,9 +138,9 @@ def ask(question, answer=str, default=None, length=None):
r = None r = None
while True: while True:
if default: if default:
r = input('> {} [{}] '.format(question, default)) r = input("> {} [{}] ".format(question, default))
else: else:
r = input('> {} '.format(question)) r = input("> {} ".format(question))
r = r.strip() r = r.strip()
@ -155,11 +152,10 @@ def ask(question, answer=str, default=None, length=None):
r = int(r) r = int(r)
break break
except ValueError: except ValueError:
print('You must enter an integer') print("You must enter an integer")
return r return r
else: else:
raise NotImplementedError( raise NotImplementedError("Argument `answer` must be str, bool, or integer")
'Argument `answer` must be str, bool, or integer')
def ask_timezone(question, default, tzurl): def ask_timezone(question, default, tzurl):
@ -178,162 +174,227 @@ def ask_timezone(question, default, tzurl):
def render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str): def render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str):
try: try:
with open(os.path.join(CONF['basedir'], target_path), with open(
'w', encoding='utf-8') as fd: os.path.join(CONF["basedir"], target_path), "w", encoding="utf-8"
) as fd:
_template = _jinja_env.get_template(tmpl_name) _template = _jinja_env.get_template(tmpl_name)
fd.write(_template.render(**tmpl_vars)) fd.write(_template.render(**tmpl_vars))
except OSError as e: except OSError as e:
print('Error: {}'.format(e)) print("Error: {}".format(e))
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="A kickstarter for Pelican", description="A kickstarter for Pelican",
formatter_class=argparse.ArgumentDefaultsHelpFormatter) formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parser.add_argument('-p', '--path', default=_DEFAULT_PATH, )
help="The path to generate the blog into") parser.add_argument(
parser.add_argument('-t', '--title', metavar="title", "-p", "--path", default=_DEFAULT_PATH, help="The path to generate the blog into"
help='Set the title of the website') )
parser.add_argument('-a', '--author', metavar="author", parser.add_argument(
help='Set the author name of the website') "-t", "--title", metavar="title", help="Set the title of the website"
parser.add_argument('-l', '--lang', metavar="lang", )
help='Set the default web site language') parser.add_argument(
"-a", "--author", metavar="author", help="Set the author name of the website"
)
parser.add_argument(
"-l", "--lang", metavar="lang", help="Set the default web site language"
)
args = parser.parse_args() args = parser.parse_args()
print('''Welcome to pelican-quickstart v{v}. print(
"""Welcome to pelican-quickstart v{v}.
This script will help you create a new Pelican-based website. This script will help you create a new Pelican-based website.
Please answer the following questions so this script can generate the files Please answer the following questions so this script can generate the files
needed by Pelican. needed by Pelican.
'''.format(v=__version__)) """.format(v=__version__)
)
project = os.path.join( project = os.path.join(os.environ.get("VIRTUAL_ENV", os.curdir), ".project")
os.environ.get('VIRTUAL_ENV', os.curdir), '.project') no_path_was_specified = hasattr(args.path, "is_default_path")
no_path_was_specified = hasattr(args.path, 'is_default_path')
if os.path.isfile(project) and no_path_was_specified: if os.path.isfile(project) and no_path_was_specified:
CONF['basedir'] = open(project).read().rstrip("\n") CONF["basedir"] = open(project).read().rstrip("\n")
print('Using project associated with current virtual environment. ' print(
'Will save to:\n%s\n' % CONF['basedir']) "Using project associated with current virtual environment. "
"Will save to:\n%s\n" % CONF["basedir"]
)
else: else:
CONF['basedir'] = os.path.abspath(os.path.expanduser( CONF["basedir"] = os.path.abspath(
ask('Where do you want to create your new web site?', os.path.expanduser(
answer=str, default=args.path))) ask(
"Where do you want to create your new web site?",
answer=str,
default=args.path,
)
)
)
CONF['sitename'] = ask('What will be the title of this web site?', CONF["sitename"] = ask(
answer=str, default=args.title) "What will be the title of this web site?", answer=str, default=args.title
CONF['author'] = ask('Who will be the author of this web site?', )
answer=str, default=args.author) CONF["author"] = ask(
CONF['lang'] = ask('What will be the default language of this web site?', "Who will be the author of this web site?", answer=str, default=args.author
str, args.lang or CONF['lang'], 2) )
CONF["lang"] = ask(
"What will be the default language of this web site?",
str,
args.lang or CONF["lang"],
2,
)
if ask('Do you want to specify a URL prefix? e.g., https://example.com ', if ask(
answer=bool, default=True): "Do you want to specify a URL prefix? e.g., https://example.com ",
CONF['siteurl'] = ask('What is your URL prefix? (see ' answer=bool,
'above example; no trailing slash)', default=True,
str, CONF['siteurl']) ):
CONF["siteurl"] = ask(
"What is your URL prefix? (see " "above example; no trailing slash)",
str,
CONF["siteurl"],
)
CONF['with_pagination'] = ask('Do you want to enable article pagination?', CONF["with_pagination"] = ask(
bool, bool(CONF['default_pagination'])) "Do you want to enable article pagination?",
bool,
bool(CONF["default_pagination"]),
)
if CONF['with_pagination']: if CONF["with_pagination"]:
CONF['default_pagination'] = ask('How many articles per page ' CONF["default_pagination"] = ask(
'do you want?', "How many articles per page " "do you want?",
int, CONF['default_pagination']) int,
CONF["default_pagination"],
)
else: else:
CONF['default_pagination'] = False CONF["default_pagination"] = False
CONF['timezone'] = ask_timezone('What is your time zone?', CONF["timezone"] = ask_timezone(
CONF['timezone'], _TZ_URL) "What is your time zone?", CONF["timezone"], _TZ_URL
)
automation = ask('Do you want to generate a tasks.py/Makefile ' automation = ask(
'to automate generation and publishing?', bool, True) "Do you want to generate a tasks.py/Makefile "
"to automate generation and publishing?",
bool,
True,
)
if automation: if automation:
if ask('Do you want to upload your website using FTP?', if ask(
answer=bool, default=False): "Do you want to upload your website using FTP?", answer=bool, default=False
CONF['ftp'] = True, ):
CONF['ftp_host'] = ask('What is the hostname of your FTP server?', CONF["ftp"] = (True,)
str, CONF['ftp_host']) CONF["ftp_host"] = ask(
CONF['ftp_user'] = ask('What is your username on that server?', "What is the hostname of your FTP server?", str, CONF["ftp_host"]
str, CONF['ftp_user']) )
CONF['ftp_target_dir'] = ask('Where do you want to put your ' CONF["ftp_user"] = ask(
'web site on that server?', "What is your username on that server?", str, CONF["ftp_user"]
str, CONF['ftp_target_dir']) )
if ask('Do you want to upload your website using SSH?', CONF["ftp_target_dir"] = ask(
answer=bool, default=False): "Where do you want to put your " "web site on that server?",
CONF['ssh'] = True, str,
CONF['ssh_host'] = ask('What is the hostname of your SSH server?', CONF["ftp_target_dir"],
str, CONF['ssh_host']) )
CONF['ssh_port'] = ask('What is the port of your SSH server?', if ask(
int, CONF['ssh_port']) "Do you want to upload your website using SSH?", answer=bool, default=False
CONF['ssh_user'] = ask('What is your username on that server?', ):
str, CONF['ssh_user']) CONF["ssh"] = (True,)
CONF['ssh_target_dir'] = ask('Where do you want to put your ' CONF["ssh_host"] = ask(
'web site on that server?', "What is the hostname of your SSH server?", str, CONF["ssh_host"]
str, CONF['ssh_target_dir']) )
CONF["ssh_port"] = ask(
"What is the port of your SSH server?", int, CONF["ssh_port"]
)
CONF["ssh_user"] = ask(
"What is your username on that server?", str, CONF["ssh_user"]
)
CONF["ssh_target_dir"] = ask(
"Where do you want to put your " "web site on that server?",
str,
CONF["ssh_target_dir"],
)
if ask('Do you want to upload your website using Dropbox?', if ask(
answer=bool, default=False): "Do you want to upload your website using Dropbox?",
CONF['dropbox'] = True, answer=bool,
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?', default=False,
str, CONF['dropbox_dir']) ):
CONF["dropbox"] = (True,)
CONF["dropbox_dir"] = ask(
"Where is your Dropbox directory?", str, CONF["dropbox_dir"]
)
if ask('Do you want to upload your website using S3?', if ask(
answer=bool, default=False): "Do you want to upload your website using S3?", answer=bool, default=False
CONF['s3'] = True, ):
CONF['s3_bucket'] = ask('What is the name of your S3 bucket?', CONF["s3"] = (True,)
str, CONF['s3_bucket']) CONF["s3_bucket"] = ask(
"What is the name of your S3 bucket?", str, CONF["s3_bucket"]
)
if ask('Do you want to upload your website using ' if ask(
'Rackspace Cloud Files?', answer=bool, default=False): "Do you want to upload your website using " "Rackspace Cloud Files?",
CONF['cloudfiles'] = True, answer=bool,
CONF['cloudfiles_username'] = ask('What is your Rackspace ' default=False,
'Cloud username?', str, ):
CONF['cloudfiles_username']) CONF["cloudfiles"] = (True,)
CONF['cloudfiles_api_key'] = ask('What is your Rackspace ' CONF["cloudfiles_username"] = ask(
'Cloud API key?', str, "What is your Rackspace " "Cloud username?",
CONF['cloudfiles_api_key']) str,
CONF['cloudfiles_container'] = ask('What is the name of your ' CONF["cloudfiles_username"],
'Cloud Files container?', )
str, CONF["cloudfiles_api_key"] = ask(
CONF['cloudfiles_container']) "What is your Rackspace " "Cloud API key?",
str,
CONF["cloudfiles_api_key"],
)
CONF["cloudfiles_container"] = ask(
"What is the name of your " "Cloud Files container?",
str,
CONF["cloudfiles_container"],
)
if ask('Do you want to upload your website using GitHub Pages?', if ask(
answer=bool, default=False): "Do you want to upload your website using GitHub Pages?",
CONF['github'] = True, answer=bool,
if ask('Is this your personal page (username.github.io)?', default=False,
answer=bool, default=False): ):
CONF['github_pages_branch'] = \ CONF["github"] = (True,)
_GITHUB_PAGES_BRANCHES['personal'] if ask(
"Is this your personal page (username.github.io)?",
answer=bool,
default=False,
):
CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["personal"]
else: else:
CONF['github_pages_branch'] = \ CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["project"]
_GITHUB_PAGES_BRANCHES['project']
try: try:
os.makedirs(os.path.join(CONF['basedir'], 'content')) os.makedirs(os.path.join(CONF["basedir"], "content"))
except OSError as e: except OSError as e:
print('Error: {}'.format(e)) print("Error: {}".format(e))
try: try:
os.makedirs(os.path.join(CONF['basedir'], 'output')) os.makedirs(os.path.join(CONF["basedir"], "output"))
except OSError as e: except OSError as e:
print('Error: {}'.format(e)) print("Error: {}".format(e))
conf_python = dict() conf_python = dict()
for key, value in CONF.items(): for key, value in CONF.items():
conf_python[key] = repr(value) conf_python[key] = repr(value)
render_jinja_template('pelicanconf.py.jinja2', conf_python, 'pelicanconf.py') render_jinja_template("pelicanconf.py.jinja2", conf_python, "pelicanconf.py")
render_jinja_template('publishconf.py.jinja2', CONF, 'publishconf.py') render_jinja_template("publishconf.py.jinja2", CONF, "publishconf.py")
if automation: if automation:
render_jinja_template('tasks.py.jinja2', CONF, 'tasks.py') render_jinja_template("tasks.py.jinja2", CONF, "tasks.py")
render_jinja_template('Makefile.jinja2', CONF, 'Makefile') render_jinja_template("Makefile.jinja2", CONF, "Makefile")
print('Done. Your new project is available at %s' % CONF['basedir']) print("Done. Your new project is available at %s" % CONF["basedir"])
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -8,7 +8,7 @@ import sys
def err(msg, die=None): def err(msg, die=None):
"""Print an error message and exits if an exit code is given""" """Print an error message and exits if an exit code is given"""
sys.stderr.write(msg + '\n') sys.stderr.write(msg + "\n")
if die: if die:
sys.exit(die if isinstance(die, int) else 1) sys.exit(die if isinstance(die, int) else 1)
@ -16,62 +16,96 @@ def err(msg, die=None):
try: try:
import pelican import pelican
except ImportError: except ImportError:
err('Cannot import pelican.\nYou must ' err(
'install Pelican in order to run this script.', "Cannot import pelican.\nYou must "
-1) "install Pelican in order to run this script.",
-1,
)
global _THEMES_PATH global _THEMES_PATH
_THEMES_PATH = os.path.join( _THEMES_PATH = os.path.join(
os.path.dirname( os.path.dirname(os.path.abspath(pelican.__file__)), "themes"
os.path.abspath(pelican.__file__)
),
'themes'
) )
__version__ = '0.2' __version__ = "0.2"
_BUILTIN_THEMES = ['simple', 'notmyidea'] _BUILTIN_THEMES = ["simple", "notmyidea"]
def main(): def main():
"""Main function""" """Main function"""
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(description="""Install themes for Pelican""")
description="""Install themes for Pelican""")
excl = parser.add_mutually_exclusive_group() excl = parser.add_mutually_exclusive_group()
excl.add_argument( excl.add_argument(
'-l', '--list', dest='action', action="store_const", const='list', "-l",
help="Show the themes already installed and exit") "--list",
dest="action",
action="store_const",
const="list",
help="Show the themes already installed and exit",
)
excl.add_argument( excl.add_argument(
'-p', '--path', dest='action', action="store_const", const='path', "-p",
help="Show the themes path and exit") "--path",
dest="action",
action="store_const",
const="path",
help="Show the themes path and exit",
)
excl.add_argument( excl.add_argument(
'-V', '--version', action='version', "-V",
version='pelican-themes v{}'.format(__version__), "--version",
help='Print the version of this script') action="version",
version="pelican-themes v{}".format(__version__),
help="Print the version of this script",
)
parser.add_argument( parser.add_argument(
'-i', '--install', dest='to_install', nargs='+', metavar="theme path", "-i",
help='The themes to install') "--install",
dest="to_install",
nargs="+",
metavar="theme path",
help="The themes to install",
)
parser.add_argument( parser.add_argument(
'-r', '--remove', dest='to_remove', nargs='+', metavar="theme name", "-r",
help='The themes to remove') "--remove",
dest="to_remove",
nargs="+",
metavar="theme name",
help="The themes to remove",
)
parser.add_argument( parser.add_argument(
'-U', '--upgrade', dest='to_upgrade', nargs='+', "-U",
metavar="theme path", help='The themes to upgrade') "--upgrade",
dest="to_upgrade",
nargs="+",
metavar="theme path",
help="The themes to upgrade",
)
parser.add_argument( parser.add_argument(
'-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path", "-s",
"--symlink",
dest="to_symlink",
nargs="+",
metavar="theme path",
help="Same as `--install', but create a symbolic link instead of " help="Same as `--install', but create a symbolic link instead of "
"copying the theme. Useful for theme development") "copying the theme. Useful for theme development",
)
parser.add_argument( parser.add_argument(
'-c', '--clean', dest='clean', action="store_true", "-c",
help="Remove the broken symbolic links of the theme path") "--clean",
dest="clean",
action="store_true",
help="Remove the broken symbolic links of the theme path",
)
parser.add_argument( parser.add_argument(
'-v', '--verbose', dest='verbose', "-v", "--verbose", dest="verbose", action="store_true", help="Verbose output"
action="store_true", )
help="Verbose output")
args = parser.parse_args() args = parser.parse_args()
@ -79,46 +113,46 @@ def main():
to_sym = args.to_symlink or args.clean to_sym = args.to_symlink or args.clean
if args.action: if args.action:
if args.action == 'list': if args.action == "list":
list_themes(args.verbose) list_themes(args.verbose)
elif args.action == 'path': elif args.action == "path":
print(_THEMES_PATH) print(_THEMES_PATH)
elif to_install or args.to_remove or to_sym: elif to_install or args.to_remove or to_sym:
if args.to_remove: if args.to_remove:
if args.verbose: if args.verbose:
print('Removing themes...') print("Removing themes...")
for i in args.to_remove: for i in args.to_remove:
remove(i, v=args.verbose) remove(i, v=args.verbose)
if args.to_install: if args.to_install:
if args.verbose: if args.verbose:
print('Installing themes...') print("Installing themes...")
for i in args.to_install: for i in args.to_install:
install(i, v=args.verbose) install(i, v=args.verbose)
if args.to_upgrade: if args.to_upgrade:
if args.verbose: if args.verbose:
print('Upgrading themes...') print("Upgrading themes...")
for i in args.to_upgrade: for i in args.to_upgrade:
install(i, v=args.verbose, u=True) install(i, v=args.verbose, u=True)
if args.to_symlink: if args.to_symlink:
if args.verbose: if args.verbose:
print('Linking themes...') print("Linking themes...")
for i in args.to_symlink: for i in args.to_symlink:
symlink(i, v=args.verbose) symlink(i, v=args.verbose)
if args.clean: if args.clean:
if args.verbose: if args.verbose:
print('Cleaning the themes directory...') print("Cleaning the themes directory...")
clean(v=args.verbose) clean(v=args.verbose)
else: else:
print('No argument given... exiting.') print("No argument given... exiting.")
def themes(): def themes():
@ -142,7 +176,7 @@ def list_themes(v=False):
if v: if v:
print(theme_path + (" (symbolic link to `" + link_target + "')")) print(theme_path + (" (symbolic link to `" + link_target + "')"))
else: else:
print(theme_path + '@') print(theme_path + "@")
else: else:
print(theme_path) print(theme_path)
@ -150,51 +184,52 @@ def list_themes(v=False):
def remove(theme_name, v=False): def remove(theme_name, v=False):
"""Removes a theme""" """Removes a theme"""
theme_name = theme_name.replace('/', '') theme_name = theme_name.replace("/", "")
target = os.path.join(_THEMES_PATH, theme_name) target = os.path.join(_THEMES_PATH, theme_name)
if theme_name in _BUILTIN_THEMES: if theme_name in _BUILTIN_THEMES:
err(theme_name + ' is a builtin theme.\n' err(
'You cannot remove a builtin theme with this script, ' theme_name + " is a builtin theme.\n"
'remove it by hand if you want.') "You cannot remove a builtin theme with this script, "
"remove it by hand if you want."
)
elif os.path.islink(target): elif os.path.islink(target):
if v: if v:
print('Removing link `' + target + "'") print("Removing link `" + target + "'")
os.remove(target) os.remove(target)
elif os.path.isdir(target): elif os.path.isdir(target):
if v: if v:
print('Removing directory `' + target + "'") print("Removing directory `" + target + "'")
shutil.rmtree(target) shutil.rmtree(target)
elif os.path.exists(target): elif os.path.exists(target):
err(target + ' : not a valid theme') err(target + " : not a valid theme")
else: else:
err(target + ' : no such file or directory') err(target + " : no such file or directory")
def install(path, v=False, u=False): def install(path, v=False, u=False):
"""Installs a theme""" """Installs a theme"""
if not os.path.exists(path): if not os.path.exists(path):
err(path + ' : no such file or directory') err(path + " : no such file or directory")
elif not os.path.isdir(path): elif not os.path.isdir(path):
err(path + ' : not a directory') err(path + " : not a directory")
else: else:
theme_name = os.path.basename(os.path.normpath(path)) theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name) theme_path = os.path.join(_THEMES_PATH, theme_name)
exists = os.path.exists(theme_path) exists = os.path.exists(theme_path)
if exists and not u: if exists and not u:
err(path + ' : already exists') err(path + " : already exists")
elif exists: elif exists:
remove(theme_name, v) remove(theme_name, v)
install(path, v) install(path, v)
else: else:
if v: if v:
print("Copying '{p}' to '{t}' ...".format(p=path, print("Copying '{p}' to '{t}' ...".format(p=path, t=theme_path))
t=theme_path))
try: try:
shutil.copytree(path, theme_path) shutil.copytree(path, theme_path)
try: try:
if os.name == 'posix': if os.name == "posix":
for root, dirs, files in os.walk(theme_path): for root, dirs, files in os.walk(theme_path):
for d in dirs: for d in dirs:
dname = os.path.join(root, d) dname = os.path.join(root, d)
@ -203,35 +238,41 @@ def install(path, v=False, u=False):
fname = os.path.join(root, f) fname = os.path.join(root, f)
os.chmod(fname, 420) # 0o644 os.chmod(fname, 420) # 0o644
except OSError as e: except OSError as e:
err("Cannot change permissions of files " err(
"or directory in `{r}':\n{e}".format(r=theme_path, "Cannot change permissions of files "
e=str(e)), "or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)),
die=False) die=False,
)
except Exception as e: except Exception as e:
err("Cannot copy `{p}' to `{t}':\n{e}".format( err(
p=path, t=theme_path, e=str(e))) "Cannot copy `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)
)
)
def symlink(path, v=False): def symlink(path, v=False):
"""Symbolically link a theme""" """Symbolically link a theme"""
if not os.path.exists(path): if not os.path.exists(path):
err(path + ' : no such file or directory') err(path + " : no such file or directory")
elif not os.path.isdir(path): elif not os.path.isdir(path):
err(path + ' : not a directory') err(path + " : not a directory")
else: else:
theme_name = os.path.basename(os.path.normpath(path)) theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name) theme_path = os.path.join(_THEMES_PATH, theme_name)
if os.path.exists(theme_path): if os.path.exists(theme_path):
err(path + ' : already exists') err(path + " : already exists")
else: else:
if v: if v:
print("Linking `{p}' to `{t}' ...".format( print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
p=path, t=theme_path))
try: try:
os.symlink(path, theme_path) os.symlink(path, theme_path)
except Exception as e: except Exception as e:
err("Cannot link `{p}' to `{t}':\n{e}".format( err(
p=path, t=theme_path, e=str(e))) "Cannot link `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)
)
)
def is_broken_link(path): def is_broken_link(path):
@ -247,11 +288,11 @@ def clean(v=False):
path = os.path.join(_THEMES_PATH, path) path = os.path.join(_THEMES_PATH, path)
if os.path.islink(path) and is_broken_link(path): if os.path.islink(path) and is_broken_link(path):
if v: if v:
print('Removing {}'.format(path)) print("Removing {}".format(path))
try: try:
os.remove(path) os.remove(path)
except OSError: except OSError:
print('Error: cannot remove {}'.format(path)) print("Error: cannot remove {}".format(path))
else: else:
c += 1 c += 1

View file

@ -31,17 +31,16 @@ class URLWrapper:
@property @property
def slug(self): def slug(self):
if self._slug is None: if self._slug is None:
class_key = '{}_REGEX_SUBSTITUTIONS'.format( class_key = "{}_REGEX_SUBSTITUTIONS".format(self.__class__.__name__.upper())
self.__class__.__name__.upper())
regex_subs = self.settings.get( regex_subs = self.settings.get(
class_key, class_key, self.settings.get("SLUG_REGEX_SUBSTITUTIONS", [])
self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])) )
preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False) preserve_case = self.settings.get("SLUGIFY_PRESERVE_CASE", False)
self._slug = slugify( self._slug = slugify(
self.name, self.name,
regex_subs=regex_subs, regex_subs=regex_subs,
preserve_case=preserve_case, preserve_case=preserve_case,
use_unicode=self.settings.get('SLUGIFY_USE_UNICODE', False) use_unicode=self.settings.get("SLUGIFY_USE_UNICODE", False),
) )
return self._slug return self._slug
@ -53,26 +52,26 @@ class URLWrapper:
def as_dict(self): def as_dict(self):
d = self.__dict__ d = self.__dict__
d['name'] = self.name d["name"] = self.name
d['slug'] = self.slug d["slug"] = self.slug
return d return d
def __hash__(self): def __hash__(self):
return hash(self.slug) return hash(self.slug)
def _normalize_key(self, key): def _normalize_key(self, key):
class_key = '{}_REGEX_SUBSTITUTIONS'.format( class_key = "{}_REGEX_SUBSTITUTIONS".format(self.__class__.__name__.upper())
self.__class__.__name__.upper())
regex_subs = self.settings.get( regex_subs = self.settings.get(
class_key, class_key, self.settings.get("SLUG_REGEX_SUBSTITUTIONS", [])
self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])) )
use_unicode = self.settings.get('SLUGIFY_USE_UNICODE', False) use_unicode = self.settings.get("SLUGIFY_USE_UNICODE", False)
preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False) preserve_case = self.settings.get("SLUGIFY_PRESERVE_CASE", False)
return slugify( return slugify(
key, key,
regex_subs=regex_subs, regex_subs=regex_subs,
preserve_case=preserve_case, preserve_case=preserve_case,
use_unicode=use_unicode) use_unicode=use_unicode,
)
def __eq__(self, other): def __eq__(self, other):
if isinstance(other, self.__class__): if isinstance(other, self.__class__):
@ -99,7 +98,7 @@ class URLWrapper:
return self.name return self.name
def __repr__(self): def __repr__(self):
return '<{} {}>'.format(type(self).__name__, repr(self._name)) return "<{} {}>".format(type(self).__name__, repr(self._name))
def _from_settings(self, key, get_page_name=False): def _from_settings(self, key, get_page_name=False):
"""Returns URL information as defined in settings. """Returns URL information as defined in settings.
@ -114,7 +113,7 @@ class URLWrapper:
if isinstance(value, pathlib.Path): if isinstance(value, pathlib.Path):
value = str(value) value = str(value)
if not isinstance(value, str): if not isinstance(value, str):
logger.warning('%s is set to %s', setting, value) logger.warning("%s is set to %s", setting, value)
return value return value
else: else:
if get_page_name: if get_page_name:
@ -122,10 +121,11 @@ class URLWrapper:
else: else:
return value.format(**self.as_dict()) return value.format(**self.as_dict())
page_name = property(functools.partial(_from_settings, key='URL', page_name = property(
get_page_name=True)) functools.partial(_from_settings, key="URL", get_page_name=True)
url = property(functools.partial(_from_settings, key='URL')) )
save_as = property(functools.partial(_from_settings, key='SAVE_AS')) url = property(functools.partial(_from_settings, key="URL"))
save_as = property(functools.partial(_from_settings, key="SAVE_AS"))
class Category(URLWrapper): class Category(URLWrapper):

View file

@ -32,38 +32,37 @@ logger = logging.getLogger(__name__)
def sanitised_join(base_directory, *parts): def sanitised_join(base_directory, *parts):
joined = posixize_path( joined = posixize_path(os.path.abspath(os.path.join(base_directory, *parts)))
os.path.abspath(os.path.join(base_directory, *parts)))
base = posixize_path(os.path.abspath(base_directory)) base = posixize_path(os.path.abspath(base_directory))
if not joined.startswith(base): if not joined.startswith(base):
raise RuntimeError( raise RuntimeError(
"Attempted to break out of output directory to {}".format( "Attempted to break out of output directory to {}".format(joined)
joined
)
) )
return joined return joined
def strftime(date, date_format): def strftime(date, date_format):
''' """
Enhanced replacement for built-in strftime with zero stripping Enhanced replacement for built-in strftime with zero stripping
This works by 'grabbing' possible format strings (those starting with %), This works by 'grabbing' possible format strings (those starting with %),
formatting them with the date, stripping any leading zeros if - prefix is formatting them with the date, stripping any leading zeros if - prefix is
used and replacing formatted output back. used and replacing formatted output back.
''' """
def strip_zeros(x): def strip_zeros(x):
return x.lstrip('0') or '0' return x.lstrip("0") or "0"
# includes ISO date parameters added by Python 3.6 # includes ISO date parameters added by Python 3.6
c89_directives = 'aAbBcdfGHIjmMpSUuVwWxXyYzZ%' c89_directives = "aAbBcdfGHIjmMpSUuVwWxXyYzZ%"
# grab candidate format options # grab candidate format options
format_options = '%[-]?.' format_options = "%[-]?."
candidates = re.findall(format_options, date_format) candidates = re.findall(format_options, date_format)
# replace candidates with placeholders for later % formatting # replace candidates with placeholders for later % formatting
template = re.sub(format_options, '%s', date_format) template = re.sub(format_options, "%s", date_format)
formatted_candidates = [] formatted_candidates = []
for candidate in candidates: for candidate in candidates:
@ -72,7 +71,7 @@ def strftime(date, date_format):
# check for '-' prefix # check for '-' prefix
if len(candidate) == 3: if len(candidate) == 3:
# '-' prefix # '-' prefix
candidate = '%{}'.format(candidate[-1]) candidate = "%{}".format(candidate[-1])
conversion = strip_zeros conversion = strip_zeros
else: else:
conversion = None conversion = None
@ -95,10 +94,10 @@ def strftime(date, date_format):
class SafeDatetime(datetime.datetime): class SafeDatetime(datetime.datetime):
'''Subclass of datetime that works with utf-8 format strings on PY2''' """Subclass of datetime that works with utf-8 format strings on PY2"""
def strftime(self, fmt, safe=True): def strftime(self, fmt, safe=True):
'''Uses our custom strftime if supposed to be *safe*''' """Uses our custom strftime if supposed to be *safe*"""
if safe: if safe:
return strftime(self, fmt) return strftime(self, fmt)
else: else:
@ -106,22 +105,21 @@ class SafeDatetime(datetime.datetime):
class DateFormatter: class DateFormatter:
'''A date formatter object used as a jinja filter """A date formatter object used as a jinja filter
Uses the `strftime` implementation and makes sure jinja uses the locale Uses the `strftime` implementation and makes sure jinja uses the locale
defined in LOCALE setting defined in LOCALE setting
''' """
def __init__(self): def __init__(self):
self.locale = locale.setlocale(locale.LC_TIME) self.locale = locale.setlocale(locale.LC_TIME)
def __call__(self, date, date_format): def __call__(self, date, date_format):
# on OSX, encoding from LC_CTYPE determines the unicode output in PY3 # on OSX, encoding from LC_CTYPE determines the unicode output in PY3
# make sure it's same as LC_TIME # make sure it's same as LC_TIME
with temporary_locale(self.locale, locale.LC_TIME), \ with temporary_locale(self.locale, locale.LC_TIME), temporary_locale(
temporary_locale(self.locale, locale.LC_CTYPE): self.locale, locale.LC_CTYPE
):
formatted = strftime(date, date_format) formatted = strftime(date, date_format)
return formatted return formatted
@ -155,7 +153,7 @@ class memoized:
return self.func.__doc__ return self.func.__doc__
def __get__(self, obj, objtype): def __get__(self, obj, objtype):
'''Support instance methods.''' """Support instance methods."""
fn = partial(self.__call__, obj) fn = partial(self.__call__, obj)
fn.cache = self.cache fn.cache = self.cache
return fn return fn
@ -177,17 +175,16 @@ def deprecated_attribute(old, new, since=None, remove=None, doc=None):
Note that the decorator needs a dummy method to attach to, but the Note that the decorator needs a dummy method to attach to, but the
content of the dummy method is ignored. content of the dummy method is ignored.
""" """
def _warn(): def _warn():
version = '.'.join(str(x) for x in since) version = ".".join(str(x) for x in since)
message = ['{} has been deprecated since {}'.format(old, version)] message = ["{} has been deprecated since {}".format(old, version)]
if remove: if remove:
version = '.'.join(str(x) for x in remove) version = ".".join(str(x) for x in remove)
message.append( message.append(" and will be removed by version {}".format(version))
' and will be removed by version {}'.format(version)) message.append(". Use {} instead.".format(new))
message.append('. Use {} instead.'.format(new)) logger.warning("".join(message))
logger.warning(''.join(message)) logger.debug("".join(str(x) for x in traceback.format_stack()))
logger.debug(''.join(str(x) for x
in traceback.format_stack()))
def fget(self): def fget(self):
_warn() _warn()
@ -208,21 +205,20 @@ def get_date(string):
If no format matches the given date, raise a ValueError. If no format matches the given date, raise a ValueError.
""" """
string = re.sub(' +', ' ', string) string = re.sub(" +", " ", string)
default = SafeDatetime.now().replace(hour=0, minute=0, default = SafeDatetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
second=0, microsecond=0)
try: try:
return dateutil.parser.parse(string, default=default) return dateutil.parser.parse(string, default=default)
except (TypeError, ValueError): except (TypeError, ValueError):
raise ValueError('{!r} is not a valid date'.format(string)) raise ValueError("{!r} is not a valid date".format(string))
@contextmanager @contextmanager
def pelican_open(filename, mode='r', strip_crs=(sys.platform == 'win32')): def pelican_open(filename, mode="r", strip_crs=(sys.platform == "win32")):
"""Open a file and return its content""" """Open a file and return its content"""
# utf-8-sig will clear any BOM if present # utf-8-sig will clear any BOM if present
with open(filename, mode, encoding='utf-8-sig') as infile: with open(filename, mode, encoding="utf-8-sig") as infile:
content = infile.read() content = infile.read()
yield content yield content
@ -244,7 +240,7 @@ def slugify(value, regex_subs=(), preserve_case=False, use_unicode=False):
def normalize_unicode(text): def normalize_unicode(text):
# normalize text by compatibility composition # normalize text by compatibility composition
# see: https://en.wikipedia.org/wiki/Unicode_equivalence # see: https://en.wikipedia.org/wiki/Unicode_equivalence
return unicodedata.normalize('NFKC', text) return unicodedata.normalize("NFKC", text)
# strip tags from value # strip tags from value
value = Markup(value).striptags() value = Markup(value).striptags()
@ -259,10 +255,8 @@ def slugify(value, regex_subs=(), preserve_case=False, use_unicode=False):
# perform regex substitutions # perform regex substitutions
for src, dst in regex_subs: for src, dst in regex_subs:
value = re.sub( value = re.sub(
normalize_unicode(src), normalize_unicode(src), normalize_unicode(dst), value, flags=re.IGNORECASE
normalize_unicode(dst), )
value,
flags=re.IGNORECASE)
if not preserve_case: if not preserve_case:
value = value.lower() value = value.lower()
@ -283,8 +277,7 @@ def copy(source, destination, ignores=None):
""" """
def walk_error(err): def walk_error(err):
logger.warning("While copying %s: %s: %s", logger.warning("While copying %s: %s: %s", source_, err.filename, err.strerror)
source_, err.filename, err.strerror)
source_ = os.path.abspath(os.path.expanduser(source)) source_ = os.path.abspath(os.path.expanduser(source))
destination_ = os.path.abspath(os.path.expanduser(destination)) destination_ = os.path.abspath(os.path.expanduser(destination))
@ -292,39 +285,40 @@ def copy(source, destination, ignores=None):
if ignores is None: if ignores is None:
ignores = [] ignores = []
if any(fnmatch.fnmatch(os.path.basename(source), ignore) if any(fnmatch.fnmatch(os.path.basename(source), ignore) for ignore in ignores):
for ignore in ignores): logger.info("Not copying %s due to ignores", source_)
logger.info('Not copying %s due to ignores', source_)
return return
if os.path.isfile(source_): if os.path.isfile(source_):
dst_dir = os.path.dirname(destination_) dst_dir = os.path.dirname(destination_)
if not os.path.exists(dst_dir): if not os.path.exists(dst_dir):
logger.info('Creating directory %s', dst_dir) logger.info("Creating directory %s", dst_dir)
os.makedirs(dst_dir) os.makedirs(dst_dir)
logger.info('Copying %s to %s', source_, destination_) logger.info("Copying %s to %s", source_, destination_)
copy_file(source_, destination_) copy_file(source_, destination_)
elif os.path.isdir(source_): elif os.path.isdir(source_):
if not os.path.exists(destination_): if not os.path.exists(destination_):
logger.info('Creating directory %s', destination_) logger.info("Creating directory %s", destination_)
os.makedirs(destination_) os.makedirs(destination_)
if not os.path.isdir(destination_): if not os.path.isdir(destination_):
logger.warning('Cannot copy %s (a directory) to %s (a file)', logger.warning(
source_, destination_) "Cannot copy %s (a directory) to %s (a file)", source_, destination_
)
return return
for src_dir, subdirs, others in os.walk(source_, followlinks=True): for src_dir, subdirs, others in os.walk(source_, followlinks=True):
dst_dir = os.path.join(destination_, dst_dir = os.path.join(destination_, os.path.relpath(src_dir, source_))
os.path.relpath(src_dir, source_))
subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i) subdirs[:] = (
for i in ignores)) s for s in subdirs if not any(fnmatch.fnmatch(s, i) for i in ignores)
others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i) )
for i in ignores)) others[:] = (
o for o in others if not any(fnmatch.fnmatch(o, i) for i in ignores)
)
if not os.path.isdir(dst_dir): if not os.path.isdir(dst_dir):
logger.info('Creating directory %s', dst_dir) logger.info("Creating directory %s", dst_dir)
# Parent directories are known to exist, so 'mkdir' suffices. # Parent directories are known to exist, so 'mkdir' suffices.
os.mkdir(dst_dir) os.mkdir(dst_dir)
@ -332,21 +326,24 @@ def copy(source, destination, ignores=None):
src_path = os.path.join(src_dir, o) src_path = os.path.join(src_dir, o)
dst_path = os.path.join(dst_dir, o) dst_path = os.path.join(dst_dir, o)
if os.path.isfile(src_path): if os.path.isfile(src_path):
logger.info('Copying %s to %s', src_path, dst_path) logger.info("Copying %s to %s", src_path, dst_path)
copy_file(src_path, dst_path) copy_file(src_path, dst_path)
else: else:
logger.warning('Skipped copy %s (not a file or ' logger.warning(
'directory) to %s', "Skipped copy %s (not a file or " "directory) to %s",
src_path, dst_path) src_path,
dst_path,
)
def copy_file(source, destination): def copy_file(source, destination):
'''Copy a file''' """Copy a file"""
try: try:
shutil.copyfile(source, destination) shutil.copyfile(source, destination)
except OSError as e: except OSError as e:
logger.warning("A problem occurred copying file %s to %s; %s", logger.warning(
source, destination, e) "A problem occurred copying file %s to %s; %s", source, destination, e
)
def clean_output_dir(path, retention): def clean_output_dir(path, retention):
@ -367,15 +364,15 @@ def clean_output_dir(path, retention):
for filename in os.listdir(path): for filename in os.listdir(path):
file = os.path.join(path, filename) file = os.path.join(path, filename)
if any(filename == retain for retain in retention): if any(filename == retain for retain in retention):
logger.debug("Skipping deletion; %s is on retention list: %s", logger.debug(
filename, file) "Skipping deletion; %s is on retention list: %s", filename, file
)
elif os.path.isdir(file): elif os.path.isdir(file):
try: try:
shutil.rmtree(file) shutil.rmtree(file)
logger.debug("Deleted directory %s", file) logger.debug("Deleted directory %s", file)
except Exception as e: except Exception as e:
logger.error("Unable to delete directory %s; %s", logger.error("Unable to delete directory %s; %s", file, e)
file, e)
elif os.path.isfile(file) or os.path.islink(file): elif os.path.isfile(file) or os.path.islink(file):
try: try:
os.remove(file) os.remove(file)
@ -407,29 +404,31 @@ def posixize_path(rel_path):
"""Use '/' as path separator, so that source references, """Use '/' as path separator, so that source references,
like '{static}/foo/bar.jpg' or 'extras/favicon.ico', like '{static}/foo/bar.jpg' or 'extras/favicon.ico',
will work on Windows as well as on Mac and Linux.""" will work on Windows as well as on Mac and Linux."""
return rel_path.replace(os.sep, '/') return rel_path.replace(os.sep, "/")
class _HTMLWordTruncator(HTMLParser): class _HTMLWordTruncator(HTMLParser):
_word_regex = re.compile(
_word_regex = re.compile(r"{DBC}|(\w[\w'-]*)".format( r"{DBC}|(\w[\w'-]*)".format(
# DBC means CJK-like characters. An character can stand for a word. # DBC means CJK-like characters. An character can stand for a word.
DBC=("([\u4E00-\u9FFF])|" # CJK Unified Ideographs DBC=(
"([\u3400-\u4DBF])|" # CJK Unified Ideographs Extension A "([\u4E00-\u9FFF])|" # CJK Unified Ideographs
"([\uF900-\uFAFF])|" # CJK Compatibility Ideographs "([\u3400-\u4DBF])|" # CJK Unified Ideographs Extension A
"([\U00020000-\U0002A6DF])|" # CJK Unified Ideographs Extension B "([\uF900-\uFAFF])|" # CJK Compatibility Ideographs
"([\U0002F800-\U0002FA1F])|" # CJK Compatibility Ideographs Supplement "([\U00020000-\U0002A6DF])|" # CJK Unified Ideographs Extension B
"([\u3040-\u30FF])|" # Hiragana and Katakana "([\U0002F800-\U0002FA1F])|" # CJK Compatibility Ideographs Supplement
"([\u1100-\u11FF])|" # Hangul Jamo "([\u3040-\u30FF])|" # Hiragana and Katakana
"([\uAC00-\uD7FF])|" # Hangul Compatibility Jamo "([\u1100-\u11FF])|" # Hangul Jamo
"([\u3130-\u318F])" # Hangul Syllables "([\uAC00-\uD7FF])|" # Hangul Compatibility Jamo
)), re.UNICODE) "([\u3130-\u318F])" # Hangul Syllables
_word_prefix_regex = re.compile(r'\w', re.U) )
_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', ),
'hr', 'input') re.UNICODE,
)
_word_prefix_regex = re.compile(r"\w", re.U)
_singlets = ("br", "col", "link", "base", "img", "param", "area", "hr", "input")
class TruncationCompleted(Exception): class TruncationCompleted(Exception):
def __init__(self, truncate_at): def __init__(self, truncate_at):
super().__init__(truncate_at) super().__init__(truncate_at)
self.truncate_at = truncate_at self.truncate_at = truncate_at
@ -455,7 +454,7 @@ class _HTMLWordTruncator(HTMLParser):
line_start = 0 line_start = 0
lineno, line_offset = self.getpos() lineno, line_offset = self.getpos()
for i in range(lineno - 1): for i in range(lineno - 1):
line_start = self.rawdata.index('\n', line_start) + 1 line_start = self.rawdata.index("\n", line_start) + 1
return line_start + line_offset return line_start + line_offset
def add_word(self, word_end): def add_word(self, word_end):
@ -482,7 +481,7 @@ class _HTMLWordTruncator(HTMLParser):
else: else:
# SGML: An end tag closes, back to the matching start tag, # SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags # all unclosed intervening start tags with omitted end tags
del self.open_tags[:i + 1] del self.open_tags[: i + 1]
def handle_data(self, data): def handle_data(self, data):
word_end = 0 word_end = 0
@ -531,7 +530,7 @@ class _HTMLWordTruncator(HTMLParser):
ref_end = offset + len(name) + 1 ref_end = offset + len(name) + 1
try: try:
if self.rawdata[ref_end] == ';': if self.rawdata[ref_end] == ";":
ref_end += 1 ref_end += 1
except IndexError: except IndexError:
# We are at the end of the string and there's no ';' # We are at the end of the string and there's no ';'
@ -556,7 +555,7 @@ class _HTMLWordTruncator(HTMLParser):
codepoint = entities.name2codepoint[name] codepoint = entities.name2codepoint[name]
char = chr(codepoint) char = chr(codepoint)
except KeyError: except KeyError:
char = '' char = ""
self._handle_ref(name, char) self._handle_ref(name, char)
def handle_charref(self, name): def handle_charref(self, name):
@ -567,17 +566,17 @@ class _HTMLWordTruncator(HTMLParser):
`#x2014`) `#x2014`)
""" """
try: try:
if name.startswith('x'): if name.startswith("x"):
codepoint = int(name[1:], 16) codepoint = int(name[1:], 16)
else: else:
codepoint = int(name) codepoint = int(name)
char = chr(codepoint) char = chr(codepoint)
except (ValueError, OverflowError): except (ValueError, OverflowError):
char = '' char = ""
self._handle_ref('#' + name, char) self._handle_ref("#" + name, char)
def truncate_html_words(s, num, end_text=''): def truncate_html_words(s, num, end_text=""):
"""Truncates HTML to a certain number of words. """Truncates HTML to a certain number of words.
(not counting tags and comments). Closes opened tags if they were correctly (not counting tags and comments). Closes opened tags if they were correctly
@ -588,23 +587,23 @@ def truncate_html_words(s, num, end_text='…'):
""" """
length = int(num) length = int(num)
if length <= 0: if length <= 0:
return '' return ""
truncator = _HTMLWordTruncator(length) truncator = _HTMLWordTruncator(length)
truncator.feed(s) truncator.feed(s)
if truncator.truncate_at is None: if truncator.truncate_at is None:
return s return s
out = s[:truncator.truncate_at] out = s[: truncator.truncate_at]
if end_text: if end_text:
out += ' ' + end_text out += " " + end_text
# Close any tags still open # Close any tags still open
for tag in truncator.open_tags: for tag in truncator.open_tags:
out += '</%s>' % tag out += "</%s>" % tag
# Return string # Return string
return out return out
def process_translations(content_list, translation_id=None): def process_translations(content_list, translation_id=None):
""" Finds translations and returns them. """Finds translations and returns them.
For each content_list item, populates the 'translations' attribute, and For each content_list item, populates the 'translations' attribute, and
returns a tuple with two lists (index, translations). Index list includes returns a tuple with two lists (index, translations). Index list includes
@ -632,19 +631,23 @@ def process_translations(content_list, translation_id=None):
try: try:
content_list.sort(key=attrgetter(*translation_id)) content_list.sort(key=attrgetter(*translation_id))
except TypeError: except TypeError:
raise TypeError('Cannot unpack {}, \'translation_id\' must be falsy, a' raise TypeError(
' string or a collection of strings' "Cannot unpack {}, 'translation_id' must be falsy, a"
.format(translation_id)) " string or a collection of strings".format(translation_id)
)
except AttributeError: except AttributeError:
raise AttributeError('Cannot use {} as \'translation_id\', there ' raise AttributeError(
'appear to be items without these metadata ' "Cannot use {} as 'translation_id', there "
'attributes'.format(translation_id)) "appear to be items without these metadata "
"attributes".format(translation_id)
)
for id_vals, items in groupby(content_list, attrgetter(*translation_id)): for id_vals, items in groupby(content_list, attrgetter(*translation_id)):
# prepare warning string # prepare warning string
id_vals = (id_vals,) if len(translation_id) == 1 else id_vals id_vals = (id_vals,) if len(translation_id) == 1 else id_vals
with_str = 'with' + ', '.join([' {} "{{}}"'] * len(translation_id))\ with_str = "with" + ", ".join([' {} "{{}}"'] * len(translation_id)).format(
.format(*translation_id).format(*id_vals) *translation_id
).format(*id_vals)
items = list(items) items = list(items)
original_items = get_original_items(items, with_str) original_items = get_original_items(items, with_str)
@ -662,24 +665,24 @@ def get_original_items(items, with_str):
args = [len(items)] args = [len(items)]
args.extend(extra) args.extend(extra)
args.extend(x.source_path for x in items) args.extend(x.source_path for x in items)
logger.warning('{}: {}'.format(msg, '\n%s' * len(items)), *args) logger.warning("{}: {}".format(msg, "\n%s" * len(items)), *args)
# warn if several items have the same lang # warn if several items have the same lang
for lang, lang_items in groupby(items, attrgetter('lang')): for lang, lang_items in groupby(items, attrgetter("lang")):
lang_items = list(lang_items) lang_items = list(lang_items)
if len(lang_items) > 1: if len(lang_items) > 1:
_warn_source_paths('There are %s items "%s" with lang %s', _warn_source_paths(
lang_items, with_str, lang) 'There are %s items "%s" with lang %s', lang_items, with_str, lang
)
# items with `translation` metadata will be used as translations... # items with `translation` metadata will be used as translations...
candidate_items = [ candidate_items = [
i for i in items i for i in items if i.metadata.get("translation", "false").lower() == "false"
if i.metadata.get('translation', 'false').lower() == 'false'] ]
# ...unless all items with that slug are translations # ...unless all items with that slug are translations
if not candidate_items: if not candidate_items:
_warn_source_paths('All items ("%s") "%s" are translations', _warn_source_paths('All items ("%s") "%s" are translations', items, with_str)
items, with_str)
candidate_items = items candidate_items = items
# find items with default language # find items with default language
@ -691,13 +694,14 @@ def get_original_items(items, with_str):
# warn if there are several original items # warn if there are several original items
if len(original_items) > 1: if len(original_items) > 1:
_warn_source_paths('There are %s original (not translated) items %s', _warn_source_paths(
original_items, with_str) "There are %s original (not translated) items %s", original_items, with_str
)
return original_items return original_items
def order_content(content_list, order_by='slug'): def order_content(content_list, order_by="slug"):
""" Sorts content. """Sorts content.
order_by can be a string of an attribute or sorting function. If order_by order_by can be a string of an attribute or sorting function. If order_by
is defined, content will be ordered by that attribute or sorting function. is defined, content will be ordered by that attribute or sorting function.
@ -713,22 +717,22 @@ def order_content(content_list, order_by='slug'):
try: try:
content_list.sort(key=order_by) content_list.sort(key=order_by)
except Exception: except Exception:
logger.error('Error sorting with function %s', order_by) logger.error("Error sorting with function %s", order_by)
elif isinstance(order_by, str): elif isinstance(order_by, str):
if order_by.startswith('reversed-'): if order_by.startswith("reversed-"):
order_reversed = True order_reversed = True
order_by = order_by.replace('reversed-', '', 1) order_by = order_by.replace("reversed-", "", 1)
else: else:
order_reversed = False order_reversed = False
if order_by == 'basename': if order_by == "basename":
content_list.sort( content_list.sort(
key=lambda x: os.path.basename(x.source_path or ''), key=lambda x: os.path.basename(x.source_path or ""),
reverse=order_reversed) reverse=order_reversed,
)
else: else:
try: try:
content_list.sort(key=attrgetter(order_by), content_list.sort(key=attrgetter(order_by), reverse=order_reversed)
reverse=order_reversed)
except AttributeError: except AttributeError:
for content in content_list: for content in content_list:
try: try:
@ -736,26 +740,31 @@ def order_content(content_list, order_by='slug'):
except AttributeError: except AttributeError:
logger.warning( logger.warning(
'There is no "%s" attribute in "%s". ' 'There is no "%s" attribute in "%s". '
'Defaulting to slug order.', "Defaulting to slug order.",
order_by, order_by,
content.get_relative_source_path(), content.get_relative_source_path(),
extra={ extra={
'limit_msg': ('More files are missing ' "limit_msg": (
'the needed attribute.') "More files are missing "
}) "the needed attribute."
)
},
)
else: else:
logger.warning( logger.warning(
'Invalid *_ORDER_BY setting (%s). ' "Invalid *_ORDER_BY setting (%s). "
'Valid options are strings and functions.', order_by) "Valid options are strings and functions.",
order_by,
)
return content_list return content_list
def wait_for_changes(settings_file, reader_class, settings): def wait_for_changes(settings_file, reader_class, settings):
content_path = settings.get('PATH', '') content_path = settings.get("PATH", "")
theme_path = settings.get('THEME', '') theme_path = settings.get("THEME", "")
ignore_files = set( ignore_files = set(
fnmatch.translate(pattern) for pattern in settings.get('IGNORE_FILES', []) fnmatch.translate(pattern) for pattern in settings.get("IGNORE_FILES", [])
) )
candidate_paths = [ candidate_paths = [
@ -765,7 +774,7 @@ def wait_for_changes(settings_file, reader_class, settings):
] ]
candidate_paths.extend( candidate_paths.extend(
os.path.join(content_path, path) for path in settings.get('STATIC_PATHS', []) os.path.join(content_path, path) for path in settings.get("STATIC_PATHS", [])
) )
watching_paths = [] watching_paths = []
@ -778,11 +787,13 @@ def wait_for_changes(settings_file, reader_class, settings):
else: else:
watching_paths.append(path) watching_paths.append(path)
return next(watchfiles.watch( return next(
*watching_paths, watchfiles.watch(
watch_filter=watchfiles.DefaultFilter(ignore_entity_patterns=ignore_files), *watching_paths,
rust_timeout=0 watch_filter=watchfiles.DefaultFilter(ignore_entity_patterns=ignore_files),
)) rust_timeout=0,
)
)
def set_date_tzinfo(d, tz_name=None): def set_date_tzinfo(d, tz_name=None):
@ -811,7 +822,7 @@ def split_all(path):
""" """
if isinstance(path, str): if isinstance(path, str):
components = [] components = []
path = path.lstrip('/') path = path.lstrip("/")
while path: while path:
head, tail = os.path.split(path) head, tail = os.path.split(path)
if tail: if tail:
@ -827,32 +838,30 @@ def split_all(path):
return None return None
else: else:
raise TypeError( raise TypeError(
'"path" was {}, must be string, None, or pathlib.Path'.format( '"path" was {}, must be string, None, or pathlib.Path'.format(type(path))
type(path)
)
) )
def is_selected_for_writing(settings, path): def is_selected_for_writing(settings, path):
'''Check whether path is selected for writing """Check whether path is selected for writing
according to the WRITE_SELECTED list according to the WRITE_SELECTED list
If WRITE_SELECTED is an empty list (default), If WRITE_SELECTED is an empty list (default),
any path is selected for writing. any path is selected for writing.
''' """
if settings['WRITE_SELECTED']: if settings["WRITE_SELECTED"]:
return path in settings['WRITE_SELECTED'] return path in settings["WRITE_SELECTED"]
else: else:
return True return True
def path_to_file_url(path): def path_to_file_url(path):
'''Convert file-system path to file:// URL''' """Convert file-system path to file:// URL"""
return urllib.parse.urljoin("file://", urllib.request.pathname2url(path)) return urllib.parse.urljoin("file://", urllib.request.pathname2url(path))
def maybe_pluralize(count, singular, plural): def maybe_pluralize(count, singular, plural):
''' """
Returns a formatted string containing count and plural if count is not 1 Returns a formatted string containing count and plural if count is not 1
Returns count and singular if count is 1 Returns count and singular if count is 1
@ -860,22 +869,22 @@ def maybe_pluralize(count, singular, plural):
maybe_pluralize(1, 'Article', 'Articles') -> '1 Article' maybe_pluralize(1, 'Article', 'Articles') -> '1 Article'
maybe_pluralize(2, 'Article', 'Articles') -> '2 Articles' maybe_pluralize(2, 'Article', 'Articles') -> '2 Articles'
''' """
selection = plural selection = plural
if count == 1: if count == 1:
selection = singular selection = singular
return '{} {}'.format(count, selection) return "{} {}".format(count, selection)
@contextmanager @contextmanager
def temporary_locale(temp_locale=None, lc_category=locale.LC_ALL): def temporary_locale(temp_locale=None, lc_category=locale.LC_ALL):
''' """
Enable code to run in a context with a temporary locale Enable code to run in a context with a temporary locale
Resets the locale back when exiting context. Resets the locale back when exiting context.
Use tests.support.TestCaseWithCLocale if you want every unit test in a Use tests.support.TestCaseWithCLocale if you want every unit test in a
class to use the C locale. class to use the C locale.
''' """
orig_locale = locale.setlocale(lc_category) orig_locale = locale.setlocale(lc_category)
if temp_locale: if temp_locale:
locale.setlocale(lc_category, temp_locale) locale.setlocale(lc_category, temp_locale)

View file

@ -9,14 +9,18 @@ from markupsafe import Markup
from pelican.paginator import Paginator from pelican.paginator import Paginator
from pelican.plugins import signals from pelican.plugins import signals
from pelican.utils import (get_relative_path, is_selected_for_writing, from pelican.utils import (
path_to_url, sanitised_join, set_date_tzinfo) get_relative_path,
is_selected_for_writing,
path_to_url,
sanitised_join,
set_date_tzinfo,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Writer: class Writer:
def __init__(self, output_path, settings=None): def __init__(self, output_path, settings=None):
self.output_path = output_path self.output_path = output_path
self.reminder = dict() self.reminder = dict()
@ -25,24 +29,26 @@ class Writer:
self._overridden_files = set() self._overridden_files = set()
# See Content._link_replacer for details # See Content._link_replacer for details
if "RELATIVE_URLS" in self.settings and self.settings['RELATIVE_URLS']: if "RELATIVE_URLS" in self.settings and self.settings["RELATIVE_URLS"]:
self.urljoiner = posix_join self.urljoiner = posix_join
else: else:
self.urljoiner = lambda base, url: urljoin( self.urljoiner = lambda base, url: urljoin(
base if base.endswith('/') else base + '/', str(url)) base if base.endswith("/") else base + "/", str(url)
)
def _create_new_feed(self, feed_type, feed_title, context): def _create_new_feed(self, feed_type, feed_title, context):
feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed feed_class = Rss201rev2Feed if feed_type == "rss" else Atom1Feed
if feed_title: if feed_title:
feed_title = context['SITENAME'] + ' - ' + feed_title feed_title = context["SITENAME"] + " - " + feed_title
else: else:
feed_title = context['SITENAME'] feed_title = context["SITENAME"]
return feed_class( return feed_class(
title=Markup(feed_title).striptags(), title=Markup(feed_title).striptags(),
link=(self.site_url + '/'), link=(self.site_url + "/"),
feed_url=self.feed_url, feed_url=self.feed_url,
description=context.get('SITESUBTITLE', ''), description=context.get("SITESUBTITLE", ""),
subtitle=context.get('SITESUBTITLE', None)) subtitle=context.get("SITESUBTITLE", None),
)
def _add_item_to_the_feed(self, feed, item): def _add_item_to_the_feed(self, feed, item):
title = Markup(item.title).striptags() title = Markup(item.title).striptags()
@ -52,7 +58,7 @@ class Writer:
# RSS feeds use a single tag called 'description' for both the full # RSS feeds use a single tag called 'description' for both the full
# content and the summary # content and the summary
content = None content = None
if self.settings.get('RSS_FEED_SUMMARY_ONLY'): if self.settings.get("RSS_FEED_SUMMARY_ONLY"):
description = item.summary description = item.summary
else: else:
description = item.get_content(self.site_url) description = item.get_content(self.site_url)
@ -71,9 +77,9 @@ class Writer:
description = None description = None
categories = [] categories = []
if hasattr(item, 'category'): if hasattr(item, "category"):
categories.append(item.category) categories.append(item.category)
if hasattr(item, 'tags'): if hasattr(item, "tags"):
categories.extend(item.tags) categories.extend(item.tags)
feed.add_item( feed.add_item(
@ -83,14 +89,12 @@ class Writer:
description=description, description=description,
content=content, content=content,
categories=categories or None, categories=categories or None,
author_name=getattr(item, 'author', ''), author_name=getattr(item, "author", ""),
pubdate=set_date_tzinfo( pubdate=set_date_tzinfo(item.date, self.settings.get("TIMEZONE", None)),
item.date, self.settings.get('TIMEZONE', None)
),
updateddate=set_date_tzinfo( updateddate=set_date_tzinfo(
item.modified, self.settings.get('TIMEZONE', None) item.modified, self.settings.get("TIMEZONE", None)
) )
if hasattr(item, 'modified') if hasattr(item, "modified")
else None, else None,
) )
@ -102,22 +106,29 @@ class Writer:
""" """
if filename in self._overridden_files: if filename in self._overridden_files:
if override: if override:
raise RuntimeError('File %s is set to be overridden twice' raise RuntimeError("File %s is set to be overridden twice" % filename)
% filename) logger.info("Skipping %s", filename)
logger.info('Skipping %s', filename)
filename = os.devnull filename = os.devnull
elif filename in self._written_files: elif filename in self._written_files:
if override: if override:
logger.info('Overwriting %s', filename) logger.info("Overwriting %s", filename)
else: else:
raise RuntimeError('File %s is to be overwritten' % filename) raise RuntimeError("File %s is to be overwritten" % filename)
if override: if override:
self._overridden_files.add(filename) self._overridden_files.add(filename)
self._written_files.add(filename) self._written_files.add(filename)
return open(filename, 'w', encoding=encoding) return open(filename, "w", encoding=encoding)
def write_feed(self, elements, context, path=None, url=None, def write_feed(
feed_type='atom', override_output=False, feed_title=None): self,
elements,
context,
path=None,
url=None,
feed_type="atom",
override_output=False,
feed_title=None,
):
"""Generate a feed with the list of articles provided """Generate a feed with the list of articles provided
Return the feed. If no path or output_path is specified, just Return the feed. If no path or output_path is specified, just
@ -137,16 +148,15 @@ class Writer:
if not is_selected_for_writing(self.settings, path): if not is_selected_for_writing(self.settings, path):
return return
self.site_url = context.get( self.site_url = context.get("SITEURL", path_to_url(get_relative_path(path)))
'SITEURL', path_to_url(get_relative_path(path)))
self.feed_domain = context.get('FEED_DOMAIN') self.feed_domain = context.get("FEED_DOMAIN")
self.feed_url = self.urljoiner(self.feed_domain, url or path) self.feed_url = self.urljoiner(self.feed_domain, url or path)
feed = self._create_new_feed(feed_type, feed_title, context) feed = self._create_new_feed(feed_type, feed_title, context)
# FEED_MAX_ITEMS = None means [:None] to get every element # FEED_MAX_ITEMS = None means [:None] to get every element
for element in elements[:self.settings['FEED_MAX_ITEMS']]: for element in elements[: self.settings["FEED_MAX_ITEMS"]]:
self._add_item_to_the_feed(feed, element) self._add_item_to_the_feed(feed, element)
signals.feed_generated.send(context, feed=feed) signals.feed_generated.send(context, feed=feed)
@ -158,17 +168,25 @@ class Writer:
except Exception: except Exception:
pass pass
with self._open_w(complete_path, 'utf-8', override_output) as fp: with self._open_w(complete_path, "utf-8", override_output) as fp:
feed.write(fp, 'utf-8') feed.write(fp, "utf-8")
logger.info('Writing %s', complete_path) logger.info("Writing %s", complete_path)
signals.feed_written.send( signals.feed_written.send(complete_path, context=context, feed=feed)
complete_path, context=context, feed=feed)
return feed return feed
def write_file(self, name, template, context, relative_urls=False, def write_file(
paginated=None, template_name=None, override_output=False, self,
url=None, **kwargs): name,
template,
context,
relative_urls=False,
paginated=None,
template_name=None,
override_output=False,
url=None,
**kwargs,
):
"""Render the template and write the file. """Render the template and write the file.
:param name: name of the file to output :param name: name of the file to output
@ -185,10 +203,13 @@ class Writer:
:param **kwargs: additional variables to pass to the templates :param **kwargs: additional variables to pass to the templates
""" """
if name is False or \ if (
name == "" or \ name is False
not is_selected_for_writing(self.settings, or name == ""
os.path.join(self.output_path, name)): or not is_selected_for_writing(
self.settings, os.path.join(self.output_path, name)
)
):
return return
elif not name: elif not name:
# other stuff, just return for now # other stuff, just return for now
@ -197,8 +218,8 @@ class Writer:
def _write_file(template, localcontext, output_path, name, override): def _write_file(template, localcontext, output_path, name, override):
"""Render the template write the file.""" """Render the template write the file."""
# set localsiteurl for context so that Contents can adjust links # set localsiteurl for context so that Contents can adjust links
if localcontext['localsiteurl']: if localcontext["localsiteurl"]:
context['localsiteurl'] = localcontext['localsiteurl'] context["localsiteurl"] = localcontext["localsiteurl"]
output = template.render(localcontext) output = template.render(localcontext)
path = sanitised_join(output_path, name) path = sanitised_join(output_path, name)
@ -207,9 +228,9 @@ class Writer:
except Exception: except Exception:
pass pass
with self._open_w(path, 'utf-8', override=override) as f: with self._open_w(path, "utf-8", override=override) as f:
f.write(output) f.write(output)
logger.info('Writing %s', path) logger.info("Writing %s", path)
# Send a signal to say we're writing a file with some specific # Send a signal to say we're writing a file with some specific
# local context. # local context.
@ -217,54 +238,66 @@ class Writer:
def _get_localcontext(context, name, kwargs, relative_urls): def _get_localcontext(context, name, kwargs, relative_urls):
localcontext = context.copy() localcontext = context.copy()
localcontext['localsiteurl'] = localcontext.get( localcontext["localsiteurl"] = localcontext.get("localsiteurl", None)
'localsiteurl', None)
if relative_urls: if relative_urls:
relative_url = path_to_url(get_relative_path(name)) relative_url = path_to_url(get_relative_path(name))
localcontext['SITEURL'] = relative_url localcontext["SITEURL"] = relative_url
localcontext['localsiteurl'] = relative_url localcontext["localsiteurl"] = relative_url
localcontext['output_file'] = name localcontext["output_file"] = name
localcontext.update(kwargs) localcontext.update(kwargs)
return localcontext return localcontext
if paginated is None: if paginated is None:
paginated = {key: val for key, val in kwargs.items() paginated = {
if key in {'articles', 'dates'}} key: val for key, val in kwargs.items() if key in {"articles", "dates"}
}
# pagination # pagination
if paginated and template_name in self.settings['PAGINATED_TEMPLATES']: if paginated and template_name in self.settings["PAGINATED_TEMPLATES"]:
# pagination needed # pagination needed
per_page = self.settings['PAGINATED_TEMPLATES'][template_name] \ per_page = (
or self.settings['DEFAULT_PAGINATION'] self.settings["PAGINATED_TEMPLATES"][template_name]
or self.settings["DEFAULT_PAGINATION"]
)
# init paginators # init paginators
paginators = {key: Paginator(name, url, val, self.settings, paginators = {
per_page) key: Paginator(name, url, val, self.settings, per_page)
for key, val in paginated.items()} for key, val in paginated.items()
}
# generated pages, and write # generated pages, and write
for page_num in range(list(paginators.values())[0].num_pages): for page_num in range(list(paginators.values())[0].num_pages):
paginated_kwargs = kwargs.copy() paginated_kwargs = kwargs.copy()
for key in paginators.keys(): for key in paginators.keys():
paginator = paginators[key] paginator = paginators[key]
previous_page = paginator.page(page_num) \ previous_page = paginator.page(page_num) if page_num > 0 else None
if page_num > 0 else None
page = paginator.page(page_num + 1) page = paginator.page(page_num + 1)
next_page = paginator.page(page_num + 2) \ next_page = (
if page_num + 1 < paginator.num_pages else None paginator.page(page_num + 2)
if page_num + 1 < paginator.num_pages
else None
)
paginated_kwargs.update( paginated_kwargs.update(
{'%s_paginator' % key: paginator, {
'%s_page' % key: page, "%s_paginator" % key: paginator,
'%s_previous_page' % key: previous_page, "%s_page" % key: page,
'%s_next_page' % key: next_page}) "%s_previous_page" % key: previous_page,
"%s_next_page" % key: next_page,
}
)
localcontext = _get_localcontext( localcontext = _get_localcontext(
context, page.save_as, paginated_kwargs, relative_urls) context, page.save_as, paginated_kwargs, relative_urls
_write_file(template, localcontext, self.output_path, )
page.save_as, override_output) _write_file(
template,
localcontext,
self.output_path,
page.save_as,
override_output,
)
else: else:
# no pagination # no pagination
localcontext = _get_localcontext( localcontext = _get_localcontext(context, name, kwargs, relative_urls)
context, name, kwargs, relative_urls) _write_file(template, localcontext, self.output_path, name, override_output)
_write_file(template, localcontext, self.output_path, name,
override_output)

View file

@ -1,55 +1,59 @@
AUTHOR = 'Alexis Métaireau' AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log" SITENAME = "Alexis' log"
SITESUBTITLE = 'A personal blog.' SITESUBTITLE = "A personal blog."
SITEURL = 'http://blog.notmyidea.org' SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "Europe/Paris" TIMEZONE = "Europe/Paris"
# can be useful in development, but set to False when you're ready to publish # can be useful in development, but set to False when you're ready to publish
RELATIVE_URLS = True RELATIVE_URLS = True
GITHUB_URL = 'http://github.com/ametaireau/' GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea" DISQUS_SITENAME = "blog-notmyidea"
REVERSE_CATEGORY_ORDER = True REVERSE_CATEGORY_ORDER = True
LOCALE = "C" LOCALE = "C"
DEFAULT_PAGINATION = 4 DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 3, 2, 14, 1, 1) DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
FEED_ALL_RSS = 'feeds/all.rss.xml' FEED_ALL_RSS = "feeds/all.rss.xml"
CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml' CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
LINKS = (('Biologeek', 'http://biologeek.org'), LINKS = (
('Filyb', "http://filyb.info/"), ("Biologeek", "http://biologeek.org"),
('Libert-fr', "http://www.libert-fr.com"), ("Filyb", "http://filyb.info/"),
('N1k0', "http://prendreuncafe.com/blog/"), ("Libert-fr", "http://www.libert-fr.com"),
('Tarek Ziadé', "http://ziade.org/blog"), ("N1k0", "http://prendreuncafe.com/blog/"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),) ("Tarek Ziadé", "http://ziade.org/blog"),
("Zubin Mithra", "http://zubin71.wordpress.com/"),
)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'), SOCIAL = (
('lastfm', 'http://lastfm.com/user/akounet'), ("twitter", "http://twitter.com/ametaireau"),
('github', 'http://github.com/ametaireau'),) ("lastfm", "http://lastfm.com/user/akounet"),
("github", "http://github.com/ametaireau"),
)
# global metadata to all the contents # global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'} DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata # path-specific metadata
EXTRA_PATH_METADATA = { EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'}, "extra/robots.txt": {"path": "robots.txt"},
} }
# static paths will be copied without parsing their contents # static paths will be copied without parsing their contents
STATIC_PATHS = [ STATIC_PATHS = [
'images', "images",
'extra/robots.txt', "extra/robots.txt",
] ]
# custom page generated with a jinja2 template # custom page generated with a jinja2 template
TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'} TEMPLATE_PAGES = {"pages/jinja2_template.html": "jinja2_template.html"}
# there is no other HTML content # there is no other HTML content
READERS = {'html': None} READERS = {"html": None}
# code blocks with line numbers # code blocks with line numbers
PYGMENTS_RST_OPTIONS = {'linenos': 'table'} PYGMENTS_RST_OPTIONS = {"linenos": "table"}
# foobar will not be used, because it's not in caps. All configuration keys # foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps # have to be in caps

View file

@ -1,56 +1,60 @@
AUTHOR = 'Alexis Métaireau' AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log" SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org' SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "Europe/Paris" TIMEZONE = "Europe/Paris"
# can be useful in development, but set to False when you're ready to publish # can be useful in development, but set to False when you're ready to publish
RELATIVE_URLS = True RELATIVE_URLS = True
GITHUB_URL = 'http://github.com/ametaireau/' GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea" DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True REVERSE_CATEGORY_ORDER = True
LOCALE = "fr_FR.UTF-8" LOCALE = "fr_FR.UTF-8"
DEFAULT_PAGINATION = 4 DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 3, 2, 14, 1, 1) DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
DEFAULT_DATE_FORMAT = '%d %B %Y' DEFAULT_DATE_FORMAT = "%d %B %Y"
ARTICLE_URL = 'posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/' ARTICLE_URL = "posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/"
ARTICLE_SAVE_AS = ARTICLE_URL + 'index.html' ARTICLE_SAVE_AS = ARTICLE_URL + "index.html"
FEED_ALL_RSS = 'feeds/all.rss.xml' FEED_ALL_RSS = "feeds/all.rss.xml"
CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml' CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
LINKS = (('Biologeek', 'http://biologeek.org'), LINKS = (
('Filyb', "http://filyb.info/"), ("Biologeek", "http://biologeek.org"),
('Libert-fr', "http://www.libert-fr.com"), ("Filyb", "http://filyb.info/"),
('N1k0', "http://prendreuncafe.com/blog/"), ("Libert-fr", "http://www.libert-fr.com"),
('Tarek Ziadé', "http://ziade.org/blog"), ("N1k0", "http://prendreuncafe.com/blog/"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),) ("Tarek Ziadé", "http://ziade.org/blog"),
("Zubin Mithra", "http://zubin71.wordpress.com/"),
)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'), SOCIAL = (
('lastfm', 'http://lastfm.com/user/akounet'), ("twitter", "http://twitter.com/ametaireau"),
('github', 'http://github.com/ametaireau'),) ("lastfm", "http://lastfm.com/user/akounet"),
("github", "http://github.com/ametaireau"),
)
# global metadata to all the contents # global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'} DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata # path-specific metadata
EXTRA_PATH_METADATA = { EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'}, "extra/robots.txt": {"path": "robots.txt"},
} }
# static paths will be copied without parsing their contents # static paths will be copied without parsing their contents
STATIC_PATHS = [ STATIC_PATHS = [
'pictures', "pictures",
'extra/robots.txt', "extra/robots.txt",
] ]
# custom page generated with a jinja2 template # custom page generated with a jinja2 template
TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'} TEMPLATE_PAGES = {"pages/jinja2_template.html": "jinja2_template.html"}
# code blocks with line numbers # code blocks with line numbers
PYGMENTS_RST_OPTIONS = {'linenos': 'table'} PYGMENTS_RST_OPTIONS = {"linenos": "table"}
# foobar will not be used, because it's not in caps. All configuration keys # foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps # have to be in caps