1
0
Fork 0
forked from github/pelican

Merge pull request #3231 from getpelican/enforce-code-style

This commit is contained in:
Justin Mayer 2023-10-30 19:49:58 +01:00 committed by GitHub
commit a20cd8dda5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
63 changed files with 6557 additions and 5231 deletions

View file

@ -1,3 +1,2 @@
[report]
omit = pelican/tests/*

View file

@ -63,6 +63,8 @@ jobs:
pdm install --no-default --dev
- name: Run linters
run: pdm lint --diff
- name: Run pre-commit checks on all files
uses: pre-commit/action@v3.0.0
build:
name: Test build
@ -97,6 +99,11 @@ jobs:
run: python -m pip install -U pip tox
- name: Check
run: tox -e docs
- name: cache the docs for inspection
uses: actions/upload-artifact@v3
with:
name: docs
path: docs/_build/html/
deploy:
name: Deploy

View file

@ -1 +1 @@
<?xml version="1.0" encoding="UTF-8"?><svg id="svg33" width="64" height="64" style="clip-rule:evenodd;fill-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:1.4142" version="1.1" viewBox="0 0 64 64" xml:space="preserve" xmlns="http://www.w3.org/2000/svg"><g id="g864" transform="matrix(.10228 0 0 .10228 2.441 6.0098e-5)"><g id="g4" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path2" d="m210.35 607.22c-.34-2.106-.842-4.303-1.491-6.591-1.537-5.441-4.918-10.074-9.506-13.854-1.205-1.001-2.503-1.946-3.88-2.823-5.293-3.381-11.692-5.851-18.522-7.32-4.588-.99-9.367-1.525-14.139-1.593-34.662-.774-56.234.387-90.373-.911.012.023.012.046.022.068 1.56 1.264 3.154 2.471 4.782 3.643 3.573 2.584 7.297 4.952 11.155 7.127 7.184 4.04 14.845 7.342 22.859 9.801.956.295 1.912.58 2.87.842 5.6.603 10.631 1.206 14.648 3.074 1.015.455 1.959 1.001 2.835 1.639 2.87 2.106 6.057 6.124 8.152 8.936 4.497 5.999 3.551 10.928 8.88 13.887.557.308 1.182.604 1.889.866 1.696.638 4.119 1.491 5.225-.91.16-.342.283-.764.387-1.264-.446-1.434-1.981-2.675-2.905-3.29-1.638-1.07-2.287-1.719-3.47-2.937-2.186-2.243-2.333-6.056-3.871-8.708 1.935-.82 12.146-2.186 14.287-1.89 4.576.204 8.185.557 10.939 3.392 1.08.854 1.672 1.594 2.652 2.334.069.057.125.114.194.159 4.338 3.153 8.343 4.28 11.894 5.362.936.284 1.822.558 2.69.876 1.332.478 2.582 1.048 3.754 1.81 1.39.922 3.748 3.336 3.849 5.419-3.496-1.116-1.185.296-6.342-.102-2.515-.285-5.087-.456-7.671-.638-4.018-.284-8.038-.581-11.805-1.297-.627-.115-1.254-.251-1.867-.399-.479-.102-.946-.227-1.401-.353-.011.193-.021.376-.021.546-.104 3.939 2.674 5.908-3.678 13.399-.057.08-.137.159-.205.25-1.686 1.97-10.449 5.715-13.182 6.432-11.634 2.334-20.502-5.237-34.515-1.423-4.929 1.833-8.549 9.824-10.815 15.8-3.016 7.936-5.406 17.576-8.139 27.06 5.329-.797 10.53-1.936 15.585-3.427 11.167-3.279 21.651-8.185 31.168-14.445.911-1.231 1.912-2.29 2.994-3.108.284-.217.58-.422.877-.603.215-.137.956-.286 2.127-.502 10.861-1.924 58.5-8.377 61.597-42.962.319-3.494.172-7.285-.513-11.372zm-106.94 18.59c-6.375-1.924-8.003-2.243-12.055-5.385.067.33.17.695.307 1.081 10.779 6.068 22.608 10.462 35.141 12.842-3.893-9.051-8.502-7.445-23.393-8.538zm29.518-4.099c-2.779-6.738-10.313-10.575-16.813-12.464-8.721-3.12-15.061-.125-33.458-8.811.147.239.284.467.432.694 3.575 2.584 7.297 4.963 11.157 7.126 7.184 4.041 14.844 7.343 22.857 9.802 4.167.489 8.175 1.184 11.863 2.96 1.639.773 3.21 1.764 4.702 3.039-.183-.82-.434-1.605-.74-2.346z" style="fill-rule:nonzero;fill:url(#_Linear1)"/></g><g id="g8" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path6" d="m114.13 595.61c-.958-.262-1.914-.547-2.87-.842-8.014-2.459-15.675-5.761-22.859-9.801-3.858-2.175-7.582-4.543-11.155-7.127-1.628-1.172-3.222-2.379-4.782-3.643 2.14 6.603 11.634 13.57 18.078 16.313 8.218 3.495 16.381 4.303 23.588 5.1z" style="fill:#90d4d1"/></g><g id="g12" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path10" d="m94.253 608.25c-3.86-2.163-7.582-4.542-11.157-7.126 10.006 15.823 22.575 15.584 34.014 16.928-8.013-2.459-15.673-5.761-22.857-9.802z" style="fill:#90d4d1"/></g><g id="g16" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path14" d="m126.81 634.34c-12.533-2.38-24.362-6.774-35.141-12.842 1.376 3.973 6.351 10.257 12.943 11.658 2.858 1.024 2.094.762 6.967.614 7.137-.364 10.552-.592 15.608 1.469-.126-.308-.251-.604-.377-.899z" style="fill:#90d4d1"/></g><g id="g20" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path18" d="m143.27 665.76c-.081.101-.159.204-.239.318-13.844 14.093-31.179 24.69-50.59 30.393 1.492-4.132 2.824-8.468 4.076-12.839 5.329-.797 10.53-1.936 15.585-3.427 11.167-3.279 21.651-8.185 31.168-14.445z" style="fill:#90d4d1"/></g><g id="g24" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path22" d="m143.03 666.08c-6.046 8.287-9.118 24.122-12.659 33.274-5.144 13.342-12.294 22.95-27.958 24.317-3.928.351-27.582 1.24-30.11-.035.159-1.344 4.098-2.961 5.123-3.747 6.852-4.847 11.416-13.5 15.014-23.416 19.411-5.703 36.746-16.3 50.59-30.393z" style="fill:#14a0c4"/></g></g><defs id="defs31"><linearGradient id="_Linear1" x2="1" gradientTransform="matrix(138.58 0 0 138.58 72.442 628.88)" gradientUnits="userSpaceOnUse"><stop id="stop26" style="stop-color:rgb(84,196,198)" offset="0"/><stop id="stop28" style="stop-color:rgb(18,186,213)" offset="1"/></linearGradient></defs></svg>
<?xml version="1.0" encoding="UTF-8"?><svg id="svg33" width="64" height="64" style="clip-rule:evenodd;fill-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:1.4142" version="1.1" viewBox="0 0 64 64" xml:space="preserve" xmlns="http://www.w3.org/2000/svg"><g id="g864" transform="matrix(.10228 0 0 .10228 2.441 6.0098e-5)"><g id="g4" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path2" d="m210.35 607.22c-.34-2.106-.842-4.303-1.491-6.591-1.537-5.441-4.918-10.074-9.506-13.854-1.205-1.001-2.503-1.946-3.88-2.823-5.293-3.381-11.692-5.851-18.522-7.32-4.588-.99-9.367-1.525-14.139-1.593-34.662-.774-56.234.387-90.373-.911.012.023.012.046.022.068 1.56 1.264 3.154 2.471 4.782 3.643 3.573 2.584 7.297 4.952 11.155 7.127 7.184 4.04 14.845 7.342 22.859 9.801.956.295 1.912.58 2.87.842 5.6.603 10.631 1.206 14.648 3.074 1.015.455 1.959 1.001 2.835 1.639 2.87 2.106 6.057 6.124 8.152 8.936 4.497 5.999 3.551 10.928 8.88 13.887.557.308 1.182.604 1.889.866 1.696.638 4.119 1.491 5.225-.91.16-.342.283-.764.387-1.264-.446-1.434-1.981-2.675-2.905-3.29-1.638-1.07-2.287-1.719-3.47-2.937-2.186-2.243-2.333-6.056-3.871-8.708 1.935-.82 12.146-2.186 14.287-1.89 4.576.204 8.185.557 10.939 3.392 1.08.854 1.672 1.594 2.652 2.334.069.057.125.114.194.159 4.338 3.153 8.343 4.28 11.894 5.362.936.284 1.822.558 2.69.876 1.332.478 2.582 1.048 3.754 1.81 1.39.922 3.748 3.336 3.849 5.419-3.496-1.116-1.185.296-6.342-.102-2.515-.285-5.087-.456-7.671-.638-4.018-.284-8.038-.581-11.805-1.297-.627-.115-1.254-.251-1.867-.399-.479-.102-.946-.227-1.401-.353-.011.193-.021.376-.021.546-.104 3.939 2.674 5.908-3.678 13.399-.057.08-.137.159-.205.25-1.686 1.97-10.449 5.715-13.182 6.432-11.634 2.334-20.502-5.237-34.515-1.423-4.929 1.833-8.549 9.824-10.815 15.8-3.016 7.936-5.406 17.576-8.139 27.06 5.329-.797 10.53-1.936 15.585-3.427 11.167-3.279 21.651-8.185 31.168-14.445.911-1.231 1.912-2.29 2.994-3.108.284-.217.58-.422.877-.603.215-.137.956-.286 2.127-.502 10.861-1.924 58.5-8.377 61.597-42.962.319-3.494.172-7.285-.513-11.372zm-106.94 18.59c-6.375-1.924-8.003-2.243-12.055-5.385.067.33.17.695.307 1.081 10.779 6.068 22.608 10.462 35.141 12.842-3.893-9.051-8.502-7.445-23.393-8.538zm29.518-4.099c-2.779-6.738-10.313-10.575-16.813-12.464-8.721-3.12-15.061-.125-33.458-8.811.147.239.284.467.432.694 3.575 2.584 7.297 4.963 11.157 7.126 7.184 4.041 14.844 7.343 22.857 9.802 4.167.489 8.175 1.184 11.863 2.96 1.639.773 3.21 1.764 4.702 3.039-.183-.82-.434-1.605-.74-2.346z" style="fill-rule:nonzero;fill:url(#_Linear1)"/></g><g id="g8" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path6" d="m114.13 595.61c-.958-.262-1.914-.547-2.87-.842-8.014-2.459-15.675-5.761-22.859-9.801-3.858-2.175-7.582-4.543-11.155-7.127-1.628-1.172-3.222-2.379-4.782-3.643 2.14 6.603 11.634 13.57 18.078 16.313 8.218 3.495 16.381 4.303 23.588 5.1z" style="fill:#90d4d1"/></g><g id="g12" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path10" d="m94.253 608.25c-3.86-2.163-7.582-4.542-11.157-7.126 10.006 15.823 22.575 15.584 34.014 16.928-8.013-2.459-15.673-5.761-22.857-9.802z" style="fill:#90d4d1"/></g><g id="g16" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path14" d="m126.81 634.34c-12.533-2.38-24.362-6.774-35.141-12.842 1.376 3.973 6.351 10.257 12.943 11.658 2.858 1.024 2.094.762 6.967.614 7.137-.364 10.552-.592 15.608 1.469-.126-.308-.251-.604-.377-.899z" style="fill:#90d4d1"/></g><g id="g20" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path18" d="m143.27 665.76c-.081.101-.159.204-.239.318-13.844 14.093-31.179 24.69-50.59 30.393 1.492-4.132 2.824-8.468 4.076-12.839 5.329-.797 10.53-1.936 15.585-3.427 11.167-3.279 21.651-8.185 31.168-14.445z" style="fill:#90d4d1"/></g><g id="g24" transform="matrix(4.1667 0 0 4.1667 -301.27 -2392.2)"><path id="path22" d="m143.03 666.08c-6.046 8.287-9.118 24.122-12.659 33.274-5.144 13.342-12.294 22.95-27.958 24.317-3.928.351-27.582 1.24-30.11-.035.159-1.344 4.098-2.961 5.123-3.747 6.852-4.847 11.416-13.5 15.014-23.416 19.411-5.703 36.746-16.3 50.59-30.393z" style="fill:#14a0c4"/></g></g><defs id="defs31"><linearGradient id="_Linear1" x2="1" gradientTransform="matrix(138.58 0 0 138.58 72.442 628.88)" gradientUnits="userSpaceOnUse"><stop id="stop26" style="stop-color:rgb(84,196,198)" offset="0"/><stop id="stop28" style="stop-color:rgb(18,186,213)" offset="1"/></linearGradient></defs></svg>

Before

Width:  |  Height:  |  Size: 4.3 KiB

After

Width:  |  Height:  |  Size: 4.3 KiB

Before After
Before After

View file

@ -9,4 +9,3 @@
.wy-table-responsive {
overflow: visible !important;
}

View file

@ -9,19 +9,25 @@ import sys
import time
import traceback
from collections.abc import Iterable
# Combines all paths to `pelican` package accessible from `sys.path`
# Makes it possible to install `pelican` and namespace plugins into different
# locations in the file system (e.g. pip with `-e` or `--user`)
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# pelican.log has to be the first pelican module to be loaded
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import console
from pelican.log import init as init_logging
from pelican.generators import (ArticlesGenerator, # noqa: I100
PagesGenerator, SourceFileGenerator,
StaticGenerator, TemplatePagesGenerator)
from pelican.generators import (
ArticlesGenerator, # noqa: I100
PagesGenerator,
SourceFileGenerator,
StaticGenerator,
TemplatePagesGenerator,
)
from pelican.plugins import signals
from pelican.plugins._utils import get_plugin_name, load_plugins
from pelican.readers import Readers
@ -35,12 +41,11 @@ try:
except Exception:
__version__ = "unknown"
DEFAULT_CONFIG_NAME = 'pelicanconf.py'
DEFAULT_CONFIG_NAME = "pelicanconf.py"
logger = logging.getLogger(__name__)
class Pelican:
def __init__(self, settings):
"""Pelican initialization
@ -50,35 +55,34 @@ class Pelican:
# define the default settings
self.settings = settings
self.path = settings['PATH']
self.theme = settings['THEME']
self.output_path = settings['OUTPUT_PATH']
self.ignore_files = settings['IGNORE_FILES']
self.delete_outputdir = settings['DELETE_OUTPUT_DIRECTORY']
self.output_retention = settings['OUTPUT_RETENTION']
self.path = settings["PATH"]
self.theme = settings["THEME"]
self.output_path = settings["OUTPUT_PATH"]
self.ignore_files = settings["IGNORE_FILES"]
self.delete_outputdir = settings["DELETE_OUTPUT_DIRECTORY"]
self.output_retention = settings["OUTPUT_RETENTION"]
self.init_path()
self.init_plugins()
signals.initialized.send(self)
def init_path(self):
if not any(p in sys.path for p in ['', os.curdir]):
if not any(p in sys.path for p in ["", os.curdir]):
logger.debug("Adding current directory to system path")
sys.path.insert(0, '')
sys.path.insert(0, "")
def init_plugins(self):
self.plugins = []
for plugin in load_plugins(self.settings):
name = get_plugin_name(plugin)
logger.debug('Registering plugin `%s`', name)
logger.debug("Registering plugin `%s`", name)
try:
plugin.register()
self.plugins.append(plugin)
except Exception as e:
logger.error('Cannot register plugin `%s`\n%s',
name, e)
logger.error("Cannot register plugin `%s`\n%s", name, e)
self.settings['PLUGINS'] = [get_plugin_name(p) for p in self.plugins]
self.settings["PLUGINS"] = [get_plugin_name(p) for p in self.plugins]
def run(self):
"""Run the generators and return"""
@ -87,10 +91,10 @@ class Pelican:
context = self.settings.copy()
# Share these among all the generators and content objects
# They map source paths to Content objects or None
context['generated_content'] = {}
context['static_links'] = set()
context['static_content'] = {}
context['localsiteurl'] = self.settings['SITEURL']
context["generated_content"] = {}
context["static_links"] = set()
context["static_content"] = {}
context["localsiteurl"] = self.settings["SITEURL"]
generators = [
cls(
@ -99,23 +103,25 @@ class Pelican:
path=self.path,
theme=self.theme,
output_path=self.output_path,
) for cls in self._get_generator_classes()
)
for cls in self._get_generator_classes()
]
# Delete the output directory if (1) the appropriate setting is True
# and (2) that directory is not the parent of the source directory
if (self.delete_outputdir
and os.path.commonpath([os.path.realpath(self.output_path)]) !=
os.path.commonpath([os.path.realpath(self.output_path),
os.path.realpath(self.path)])):
if self.delete_outputdir and os.path.commonpath(
[os.path.realpath(self.output_path)]
) != os.path.commonpath(
[os.path.realpath(self.output_path), os.path.realpath(self.path)]
):
clean_output_dir(self.output_path, self.output_retention)
for p in generators:
if hasattr(p, 'generate_context'):
if hasattr(p, "generate_context"):
p.generate_context()
for p in generators:
if hasattr(p, 'refresh_metadata_intersite_links'):
if hasattr(p, "refresh_metadata_intersite_links"):
p.refresh_metadata_intersite_links()
signals.all_generators_finalized.send(generators)
@ -123,61 +129,75 @@ class Pelican:
writer = self._get_writer()
for p in generators:
if hasattr(p, 'generate_output'):
if hasattr(p, "generate_output"):
p.generate_output(writer)
signals.finalized.send(self)
articles_generator = next(g for g in generators
if isinstance(g, ArticlesGenerator))
pages_generator = next(g for g in generators
if isinstance(g, PagesGenerator))
articles_generator = next(
g for g in generators if isinstance(g, ArticlesGenerator)
)
pages_generator = next(g for g in generators if isinstance(g, PagesGenerator))
pluralized_articles = maybe_pluralize(
(len(articles_generator.articles) +
len(articles_generator.translations)),
'article',
'articles')
(len(articles_generator.articles) + len(articles_generator.translations)),
"article",
"articles",
)
pluralized_drafts = maybe_pluralize(
(len(articles_generator.drafts) +
len(articles_generator.drafts_translations)),
'draft',
'drafts')
(
len(articles_generator.drafts)
+ len(articles_generator.drafts_translations)
),
"draft",
"drafts",
)
pluralized_hidden_articles = maybe_pluralize(
(len(articles_generator.hidden_articles) +
len(articles_generator.hidden_translations)),
'hidden article',
'hidden articles')
(
len(articles_generator.hidden_articles)
+ len(articles_generator.hidden_translations)
),
"hidden article",
"hidden articles",
)
pluralized_pages = maybe_pluralize(
(len(pages_generator.pages) +
len(pages_generator.translations)),
'page',
'pages')
(len(pages_generator.pages) + len(pages_generator.translations)),
"page",
"pages",
)
pluralized_hidden_pages = maybe_pluralize(
(len(pages_generator.hidden_pages) +
len(pages_generator.hidden_translations)),
'hidden page',
'hidden pages')
(
len(pages_generator.hidden_pages)
+ len(pages_generator.hidden_translations)
),
"hidden page",
"hidden pages",
)
pluralized_draft_pages = maybe_pluralize(
(len(pages_generator.draft_pages) +
len(pages_generator.draft_translations)),
'draft page',
'draft pages')
(
len(pages_generator.draft_pages)
+ len(pages_generator.draft_translations)
),
"draft page",
"draft pages",
)
console.print('Done: Processed {}, {}, {}, {}, {} and {} in {:.2f} seconds.'
.format(
pluralized_articles,
pluralized_drafts,
pluralized_hidden_articles,
pluralized_pages,
pluralized_hidden_pages,
pluralized_draft_pages,
time.time() - start_time))
console.print(
"Done: Processed {}, {}, {}, {}, {} and {} in {:.2f} seconds.".format(
pluralized_articles,
pluralized_drafts,
pluralized_hidden_articles,
pluralized_pages,
pluralized_hidden_pages,
pluralized_draft_pages,
time.time() - start_time,
)
)
def _get_generator_classes(self):
discovered_generators = [
(ArticlesGenerator, "internal"),
(PagesGenerator, "internal")
(PagesGenerator, "internal"),
]
if self.settings["TEMPLATE_PAGES"]:
@ -236,7 +256,7 @@ class PrintSettings(argparse.Action):
except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e)
console.print_exception()
sys.exit(getattr(e, 'exitcode', 1))
sys.exit(getattr(e, "exitcode", 1))
if values:
# One or more arguments provided, so only print those settings
@ -244,14 +264,16 @@ class PrintSettings(argparse.Action):
if setting in settings:
# Only add newline between setting name and value if dict
if isinstance(settings[setting], (dict, tuple, list)):
setting_format = '\n{}:\n{}'
setting_format = "\n{}:\n{}"
else:
setting_format = '\n{}: {}'
console.print(setting_format.format(
setting,
pprint.pformat(settings[setting])))
setting_format = "\n{}: {}"
console.print(
setting_format.format(
setting, pprint.pformat(settings[setting])
)
)
else:
console.print('\n{} is not a recognized setting.'.format(setting))
console.print("\n{} is not a recognized setting.".format(setting))
break
else:
# No argument was given to --print-settings, so print all settings
@ -268,170 +290,258 @@ class ParseOverrides(argparse.Action):
k, v = item.split("=", 1)
except ValueError:
raise ValueError(
'Extra settings must be specified as KEY=VALUE pairs '
f'but you specified {item}'
"Extra settings must be specified as KEY=VALUE pairs "
f"but you specified {item}"
)
try:
overrides[k] = json.loads(v)
except json.decoder.JSONDecodeError:
raise ValueError(
f'Invalid JSON value: {v}. '
'Values specified via -e / --extra-settings flags '
'must be in JSON notation. '
'Use -e KEY=\'"string"\' to specify a string value; '
'-e KEY=null to specify None; '
'-e KEY=false (or true) to specify False (or True).'
f"Invalid JSON value: {v}. "
"Values specified via -e / --extra-settings flags "
"must be in JSON notation. "
"Use -e KEY='\"string\"' to specify a string value; "
"-e KEY=null to specify None; "
"-e KEY=false (or true) to specify False (or True)."
)
setattr(namespace, self.dest, overrides)
def parse_arguments(argv=None):
parser = argparse.ArgumentParser(
description='A tool to generate a static blog, '
' with restructured text input files.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
description="A tool to generate a static blog, "
" with restructured text input files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(dest='path', nargs='?',
help='Path where to find the content files.',
default=None)
parser.add_argument(
dest="path",
nargs="?",
help="Path where to find the content files.",
default=None,
)
parser.add_argument('-t', '--theme-path', dest='theme',
help='Path where to find the theme templates. If not '
'specified, it will use the default one included with '
'pelican.')
parser.add_argument(
"-t",
"--theme-path",
dest="theme",
help="Path where to find the theme templates. If not "
"specified, it will use the default one included with "
"pelican.",
)
parser.add_argument('-o', '--output', dest='output',
help='Where to output the generated files. If not '
'specified, a directory will be created, named '
'"output" in the current path.')
parser.add_argument(
"-o",
"--output",
dest="output",
help="Where to output the generated files. If not "
"specified, a directory will be created, named "
'"output" in the current path.',
)
parser.add_argument('-s', '--settings', dest='settings',
help='The settings of the application, this is '
'automatically set to {} if a file exists with this '
'name.'.format(DEFAULT_CONFIG_NAME))
parser.add_argument(
"-s",
"--settings",
dest="settings",
help="The settings of the application, this is "
"automatically set to {} if a file exists with this "
"name.".format(DEFAULT_CONFIG_NAME),
)
parser.add_argument('-d', '--delete-output-directory',
dest='delete_outputdir', action='store_true',
default=None, help='Delete the output directory.')
parser.add_argument(
"-d",
"--delete-output-directory",
dest="delete_outputdir",
action="store_true",
default=None,
help="Delete the output directory.",
)
parser.add_argument('-v', '--verbose', action='store_const',
const=logging.INFO, dest='verbosity',
help='Show all messages.')
parser.add_argument(
"-v",
"--verbose",
action="store_const",
const=logging.INFO,
dest="verbosity",
help="Show all messages.",
)
parser.add_argument('-q', '--quiet', action='store_const',
const=logging.CRITICAL, dest='verbosity',
help='Show only critical errors.')
parser.add_argument(
"-q",
"--quiet",
action="store_const",
const=logging.CRITICAL,
dest="verbosity",
help="Show only critical errors.",
)
parser.add_argument('-D', '--debug', action='store_const',
const=logging.DEBUG, dest='verbosity',
help='Show all messages, including debug messages.')
parser.add_argument(
"-D",
"--debug",
action="store_const",
const=logging.DEBUG,
dest="verbosity",
help="Show all messages, including debug messages.",
)
parser.add_argument('--version', action='version', version=__version__,
help='Print the pelican version and exit.')
parser.add_argument(
"--version",
action="version",
version=__version__,
help="Print the pelican version and exit.",
)
parser.add_argument('-r', '--autoreload', dest='autoreload',
action='store_true',
help='Relaunch pelican each time a modification occurs'
' on the content files.')
parser.add_argument(
"-r",
"--autoreload",
dest="autoreload",
action="store_true",
help="Relaunch pelican each time a modification occurs"
" on the content files.",
)
parser.add_argument('--print-settings', dest='print_settings', nargs='*',
action=PrintSettings, metavar='SETTING_NAME',
help='Print current configuration settings and exit. '
'Append one or more setting name arguments to see the '
'values for specific settings only.')
parser.add_argument(
"--print-settings",
dest="print_settings",
nargs="*",
action=PrintSettings,
metavar="SETTING_NAME",
help="Print current configuration settings and exit. "
"Append one or more setting name arguments to see the "
"values for specific settings only.",
)
parser.add_argument('--relative-urls', dest='relative_paths',
action='store_true',
help='Use relative urls in output, '
'useful for site development')
parser.add_argument(
"--relative-urls",
dest="relative_paths",
action="store_true",
help="Use relative urls in output, " "useful for site development",
)
parser.add_argument('--cache-path', dest='cache_path',
help=('Directory in which to store cache files. '
'If not specified, defaults to "cache".'))
parser.add_argument(
"--cache-path",
dest="cache_path",
help=(
"Directory in which to store cache files. "
'If not specified, defaults to "cache".'
),
)
parser.add_argument('--ignore-cache', action='store_true',
dest='ignore_cache', help='Ignore content cache '
'from previous runs by not loading cache files.')
parser.add_argument(
"--ignore-cache",
action="store_true",
dest="ignore_cache",
help="Ignore content cache " "from previous runs by not loading cache files.",
)
parser.add_argument('-w', '--write-selected', type=str,
dest='selected_paths', default=None,
help='Comma separated list of selected paths to write')
parser.add_argument(
"-w",
"--write-selected",
type=str,
dest="selected_paths",
default=None,
help="Comma separated list of selected paths to write",
)
parser.add_argument('--fatal', metavar='errors|warnings',
choices=('errors', 'warnings'), default='',
help=('Exit the program with non-zero status if any '
'errors/warnings encountered.'))
parser.add_argument(
"--fatal",
metavar="errors|warnings",
choices=("errors", "warnings"),
default="",
help=(
"Exit the program with non-zero status if any "
"errors/warnings encountered."
),
)
parser.add_argument('--logs-dedup-min-level', default='WARNING',
choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'),
help=('Only enable log de-duplication for levels equal'
' to or above the specified value'))
parser.add_argument(
"--logs-dedup-min-level",
default="WARNING",
choices=("DEBUG", "INFO", "WARNING", "ERROR"),
help=(
"Only enable log de-duplication for levels equal"
" to or above the specified value"
),
)
parser.add_argument('-l', '--listen', dest='listen', action='store_true',
help='Serve content files via HTTP and port 8000.')
parser.add_argument(
"-l",
"--listen",
dest="listen",
action="store_true",
help="Serve content files via HTTP and port 8000.",
)
parser.add_argument('-p', '--port', dest='port', type=int,
help='Port to serve HTTP files at. (default: 8000)')
parser.add_argument(
"-p",
"--port",
dest="port",
type=int,
help="Port to serve HTTP files at. (default: 8000)",
)
parser.add_argument('-b', '--bind', dest='bind',
help='IP to bind to when serving files via HTTP '
'(default: 127.0.0.1)')
parser.add_argument(
"-b",
"--bind",
dest="bind",
help="IP to bind to when serving files via HTTP " "(default: 127.0.0.1)",
)
parser.add_argument('-e', '--extra-settings', dest='overrides',
help='Specify one or more SETTING=VALUE pairs to '
'override settings. VALUE must be in JSON notation: '
'specify string values as SETTING=\'"some string"\'; '
'booleans as SETTING=true or SETTING=false; '
'None as SETTING=null.',
nargs='*',
action=ParseOverrides,
default={})
parser.add_argument(
"-e",
"--extra-settings",
dest="overrides",
help="Specify one or more SETTING=VALUE pairs to "
"override settings. VALUE must be in JSON notation: "
"specify string values as SETTING='\"some string\"'; "
"booleans as SETTING=true or SETTING=false; "
"None as SETTING=null.",
nargs="*",
action=ParseOverrides,
default={},
)
args = parser.parse_args(argv)
if args.port is not None and not args.listen:
logger.warning('--port without --listen has no effect')
logger.warning("--port without --listen has no effect")
if args.bind is not None and not args.listen:
logger.warning('--bind without --listen has no effect')
logger.warning("--bind without --listen has no effect")
return args
def get_config(args):
"""Builds a config dictionary based on supplied `args`.
"""
"""Builds a config dictionary based on supplied `args`."""
config = {}
if args.path:
config['PATH'] = os.path.abspath(os.path.expanduser(args.path))
config["PATH"] = os.path.abspath(os.path.expanduser(args.path))
if args.output:
config['OUTPUT_PATH'] = \
os.path.abspath(os.path.expanduser(args.output))
config["OUTPUT_PATH"] = os.path.abspath(os.path.expanduser(args.output))
if args.theme:
abstheme = os.path.abspath(os.path.expanduser(args.theme))
config['THEME'] = abstheme if os.path.exists(abstheme) else args.theme
config["THEME"] = abstheme if os.path.exists(abstheme) else args.theme
if args.delete_outputdir is not None:
config['DELETE_OUTPUT_DIRECTORY'] = args.delete_outputdir
config["DELETE_OUTPUT_DIRECTORY"] = args.delete_outputdir
if args.ignore_cache:
config['LOAD_CONTENT_CACHE'] = False
config["LOAD_CONTENT_CACHE"] = False
if args.cache_path:
config['CACHE_PATH'] = args.cache_path
config["CACHE_PATH"] = args.cache_path
if args.selected_paths:
config['WRITE_SELECTED'] = args.selected_paths.split(',')
config["WRITE_SELECTED"] = args.selected_paths.split(",")
if args.relative_paths:
config['RELATIVE_URLS'] = args.relative_paths
config["RELATIVE_URLS"] = args.relative_paths
if args.port is not None:
config['PORT'] = args.port
config["PORT"] = args.port
if args.bind is not None:
config['BIND'] = args.bind
config['DEBUG'] = args.verbosity == logging.DEBUG
config["BIND"] = args.bind
config["DEBUG"] = args.verbosity == logging.DEBUG
config.update(args.overrides)
return config
def get_instance(args):
config_file = args.settings
if config_file is None and os.path.isfile(DEFAULT_CONFIG_NAME):
config_file = DEFAULT_CONFIG_NAME
@ -439,9 +549,9 @@ def get_instance(args):
settings = read_settings(config_file, override=get_config(args))
cls = settings['PELICAN_CLASS']
cls = settings["PELICAN_CLASS"]
if isinstance(cls, str):
module, cls_name = cls.rsplit('.', 1)
module, cls_name = cls.rsplit(".", 1)
module = __import__(module)
cls = getattr(module, cls_name)
@ -449,8 +559,10 @@ def get_instance(args):
def autoreload(args, excqueue=None):
console.print(' --- AutoReload Mode: Monitoring `content`, `theme` and'
' `settings` for changes. ---')
console.print(
" --- AutoReload Mode: Monitoring `content`, `theme` and"
" `settings` for changes. ---"
)
pelican, settings = get_instance(args)
settings_file = os.path.abspath(args.settings)
while True:
@ -463,8 +575,9 @@ def autoreload(args, excqueue=None):
if settings_file in changed_files:
pelican, settings = get_instance(args)
console.print('\n-> Modified: {}. re-generating...'.format(
', '.join(changed_files)))
console.print(
"\n-> Modified: {}. re-generating...".format(", ".join(changed_files))
)
except KeyboardInterrupt:
if excqueue is not None:
@ -473,15 +586,14 @@ def autoreload(args, excqueue=None):
raise
except Exception as e:
if (args.verbosity == logging.DEBUG):
if args.verbosity == logging.DEBUG:
if excqueue is not None:
excqueue.put(
traceback.format_exception_only(type(e), e)[-1])
excqueue.put(traceback.format_exception_only(type(e), e)[-1])
else:
raise
logger.warning(
'Caught exception:\n"%s".', e,
exc_info=settings.get('DEBUG', False))
'Caught exception:\n"%s".', e, exc_info=settings.get("DEBUG", False)
)
def listen(server, port, output, excqueue=None):
@ -491,8 +603,7 @@ def listen(server, port, output, excqueue=None):
RootedHTTPServer.allow_reuse_address = True
try:
httpd = RootedHTTPServer(
output, (server, port), ComplexHTTPRequestHandler)
httpd = RootedHTTPServer(output, (server, port), ComplexHTTPRequestHandler)
except OSError as e:
logging.error("Could not listen on port %s, server %s.", port, server)
if excqueue is not None:
@ -500,8 +611,9 @@ def listen(server, port, output, excqueue=None):
return
try:
console.print("Serving site at: http://{}:{} - Tap CTRL-C to stop".format(
server, port))
console.print(
"Serving site at: http://{}:{} - Tap CTRL-C to stop".format(server, port)
)
httpd.serve_forever()
except Exception as e:
if excqueue is not None:
@ -518,24 +630,31 @@ def listen(server, port, output, excqueue=None):
def main(argv=None):
args = parse_arguments(argv)
logs_dedup_min_level = getattr(logging, args.logs_dedup_min_level)
init_logging(level=args.verbosity, fatal=args.fatal,
name=__name__, logs_dedup_min_level=logs_dedup_min_level)
init_logging(
level=args.verbosity,
fatal=args.fatal,
name=__name__,
logs_dedup_min_level=logs_dedup_min_level,
)
logger.debug('Pelican version: %s', __version__)
logger.debug('Python version: %s', sys.version.split()[0])
logger.debug("Pelican version: %s", __version__)
logger.debug("Python version: %s", sys.version.split()[0])
try:
pelican, settings = get_instance(args)
if args.autoreload and args.listen:
excqueue = multiprocessing.Queue()
p1 = multiprocessing.Process(
target=autoreload,
args=(args, excqueue))
p1 = multiprocessing.Process(target=autoreload, args=(args, excqueue))
p2 = multiprocessing.Process(
target=listen,
args=(settings.get('BIND'), settings.get('PORT'),
settings.get("OUTPUT_PATH"), excqueue))
args=(
settings.get("BIND"),
settings.get("PORT"),
settings.get("OUTPUT_PATH"),
excqueue,
),
)
try:
p1.start()
p2.start()
@ -548,16 +667,17 @@ def main(argv=None):
elif args.autoreload:
autoreload(args)
elif args.listen:
listen(settings.get('BIND'), settings.get('PORT'),
settings.get("OUTPUT_PATH"))
listen(
settings.get("BIND"), settings.get("PORT"), settings.get("OUTPUT_PATH")
)
else:
with console.status("Generating..."):
pelican.run()
except KeyboardInterrupt:
logger.warning('Keyboard interrupt received. Exiting.')
logger.warning("Keyboard interrupt received. Exiting.")
except Exception as e:
logger.critical("%s: %s", e.__class__.__name__, e)
if args.verbosity == logging.DEBUG:
console.print_exception()
sys.exit(getattr(e, 'exitcode', 1))
sys.exit(getattr(e, "exitcode", 1))

View file

@ -5,5 +5,5 @@ python -m pelican module entry point to run via python -m
from . import main
if __name__ == '__main__':
if __name__ == "__main__":
main()

View file

@ -19,29 +19,35 @@ class FileDataCacher:
Sets caching policy according to *caching_policy*.
"""
self.settings = settings
self._cache_path = os.path.join(self.settings['CACHE_PATH'],
cache_name)
self._cache_path = os.path.join(self.settings["CACHE_PATH"], cache_name)
self._cache_data_policy = caching_policy
if self.settings['GZIP_CACHE']:
if self.settings["GZIP_CACHE"]:
import gzip
self._cache_open = gzip.open
else:
self._cache_open = open
if load_policy:
try:
with self._cache_open(self._cache_path, 'rb') as fhandle:
with self._cache_open(self._cache_path, "rb") as fhandle:
self._cache = pickle.load(fhandle)
except (OSError, UnicodeDecodeError) as err:
logger.debug('Cannot load cache %s (this is normal on first '
'run). Proceeding with empty cache.\n%s',
self._cache_path, err)
logger.debug(
"Cannot load cache %s (this is normal on first "
"run). Proceeding with empty cache.\n%s",
self._cache_path,
err,
)
self._cache = {}
except pickle.PickleError as err:
logger.warning('Cannot unpickle cache %s, cache may be using '
'an incompatible protocol (see pelican '
'caching docs). '
'Proceeding with empty cache.\n%s',
self._cache_path, err)
logger.warning(
"Cannot unpickle cache %s, cache may be using "
"an incompatible protocol (see pelican "
"caching docs). "
"Proceeding with empty cache.\n%s",
self._cache_path,
err,
)
self._cache = {}
else:
self._cache = {}
@ -62,12 +68,13 @@ class FileDataCacher:
"""Save the updated cache"""
if self._cache_data_policy:
try:
mkdir_p(self.settings['CACHE_PATH'])
with self._cache_open(self._cache_path, 'wb') as fhandle:
mkdir_p(self.settings["CACHE_PATH"])
with self._cache_open(self._cache_path, "wb") as fhandle:
pickle.dump(self._cache, fhandle)
except (OSError, pickle.PicklingError, TypeError) as err:
logger.warning('Could not save cache %s\n ... %s',
self._cache_path, err)
logger.warning(
"Could not save cache %s\n ... %s", self._cache_path, err
)
class FileStampDataCacher(FileDataCacher):
@ -80,8 +87,8 @@ class FileStampDataCacher(FileDataCacher):
super().__init__(settings, cache_name, caching_policy, load_policy)
method = self.settings['CHECK_MODIFIED_METHOD']
if method == 'mtime':
method = self.settings["CHECK_MODIFIED_METHOD"]
if method == "mtime":
self._filestamp_func = os.path.getmtime
else:
try:
@ -89,12 +96,12 @@ class FileStampDataCacher(FileDataCacher):
def filestamp_func(filename):
"""return hash of file contents"""
with open(filename, 'rb') as fhandle:
with open(filename, "rb") as fhandle:
return hash_func(fhandle.read()).digest()
self._filestamp_func = filestamp_func
except AttributeError as err:
logger.warning('Could not get hashing function\n\t%s', err)
logger.warning("Could not get hashing function\n\t%s", err)
self._filestamp_func = None
def cache_data(self, filename, data):
@ -115,9 +122,8 @@ class FileStampDataCacher(FileDataCacher):
try:
return self._filestamp_func(filename)
except (OSError, TypeError) as err:
logger.warning('Cannot get modification stamp for %s\n\t%s',
filename, err)
return ''
logger.warning("Cannot get modification stamp for %s\n\t%s", filename, err)
return ""
def get_cached_data(self, filename, default=None):
"""Get the cached data for the given filename

View file

@ -16,12 +16,19 @@ except ModuleNotFoundError:
from pelican.plugins import signals
from pelican.settings import DEFAULT_CONFIG
from pelican.utils import (deprecated_attribute, memoized, path_to_url,
posixize_path, sanitised_join, set_date_tzinfo,
slugify, truncate_html_words)
from pelican.utils import (
deprecated_attribute,
memoized,
path_to_url,
posixize_path,
sanitised_join,
set_date_tzinfo,
slugify,
truncate_html_words,
)
# Import these so that they're available when you import from pelican.contents.
from pelican.urlwrappers import (Author, Category, Tag, URLWrapper) # NOQA
from pelican.urlwrappers import Author, Category, Tag, URLWrapper # NOQA
logger = logging.getLogger(__name__)
@ -36,12 +43,14 @@ class Content:
:param context: The shared context between generators.
"""
@deprecated_attribute(old='filename', new='source_path', since=(3, 2, 0))
@deprecated_attribute(old="filename", new="source_path", since=(3, 2, 0))
def filename():
return None
def __init__(self, content, metadata=None, settings=None,
source_path=None, context=None):
def __init__(
self, content, metadata=None, settings=None, source_path=None, context=None
):
if metadata is None:
metadata = {}
if settings is None:
@ -59,8 +68,8 @@ class Content:
# set metadata as attributes
for key, value in local_metadata.items():
if key in ('save_as', 'url'):
key = 'override_' + key
if key in ("save_as", "url"):
key = "override_" + key
setattr(self, key.lower(), value)
# also keep track of the metadata attributes available
@ -71,53 +80,52 @@ class Content:
# First, read the authors from "authors", if not, fallback to "author"
# and if not use the settings defined one, if any.
if not hasattr(self, 'author'):
if hasattr(self, 'authors'):
if not hasattr(self, "author"):
if hasattr(self, "authors"):
self.author = self.authors[0]
elif 'AUTHOR' in settings:
self.author = Author(settings['AUTHOR'], settings)
elif "AUTHOR" in settings:
self.author = Author(settings["AUTHOR"], settings)
if not hasattr(self, 'authors') and hasattr(self, 'author'):
if not hasattr(self, "authors") and hasattr(self, "author"):
self.authors = [self.author]
# XXX Split all the following code into pieces, there is too much here.
# manage languages
self.in_default_lang = True
if 'DEFAULT_LANG' in settings:
default_lang = settings['DEFAULT_LANG'].lower()
if not hasattr(self, 'lang'):
if "DEFAULT_LANG" in settings:
default_lang = settings["DEFAULT_LANG"].lower()
if not hasattr(self, "lang"):
self.lang = default_lang
self.in_default_lang = (self.lang == default_lang)
self.in_default_lang = self.lang == default_lang
# create the slug if not existing, generate slug according to
# setting of SLUG_ATTRIBUTE
if not hasattr(self, 'slug'):
if (settings['SLUGIFY_SOURCE'] == 'title' and
hasattr(self, 'title')):
if not hasattr(self, "slug"):
if settings["SLUGIFY_SOURCE"] == "title" and hasattr(self, "title"):
value = self.title
elif (settings['SLUGIFY_SOURCE'] == 'basename' and
source_path is not None):
elif settings["SLUGIFY_SOURCE"] == "basename" and source_path is not None:
value = os.path.basename(os.path.splitext(source_path)[0])
else:
value = None
if value is not None:
self.slug = slugify(
value,
regex_subs=settings.get('SLUG_REGEX_SUBSTITUTIONS', []),
preserve_case=settings.get('SLUGIFY_PRESERVE_CASE', False),
use_unicode=settings.get('SLUGIFY_USE_UNICODE', False))
regex_subs=settings.get("SLUG_REGEX_SUBSTITUTIONS", []),
preserve_case=settings.get("SLUGIFY_PRESERVE_CASE", False),
use_unicode=settings.get("SLUGIFY_USE_UNICODE", False),
)
self.source_path = source_path
self.relative_source_path = self.get_relative_source_path()
# manage the date format
if not hasattr(self, 'date_format'):
if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
self.date_format = settings['DATE_FORMATS'][self.lang]
if not hasattr(self, "date_format"):
if hasattr(self, "lang") and self.lang in settings["DATE_FORMATS"]:
self.date_format = settings["DATE_FORMATS"][self.lang]
else:
self.date_format = settings['DEFAULT_DATE_FORMAT']
self.date_format = settings["DEFAULT_DATE_FORMAT"]
if isinstance(self.date_format, tuple):
locale_string = self.date_format[0]
@ -129,22 +137,22 @@ class Content:
timezone = getattr(self, "timezone", default_timezone)
self.timezone = ZoneInfo(timezone)
if hasattr(self, 'date'):
if hasattr(self, "date"):
self.date = set_date_tzinfo(self.date, timezone)
self.locale_date = self.date.strftime(self.date_format)
if hasattr(self, 'modified'):
if hasattr(self, "modified"):
self.modified = set_date_tzinfo(self.modified, timezone)
self.locale_modified = self.modified.strftime(self.date_format)
# manage status
if not hasattr(self, 'status'):
if not hasattr(self, "status"):
# Previous default of None broke comment plugins and perhaps others
self.status = getattr(self, 'default_status', '')
self.status = getattr(self, "default_status", "")
# store the summary metadata if it is set
if 'summary' in metadata:
self._summary = metadata['summary']
if "summary" in metadata:
self._summary = metadata["summary"]
signals.content_object_init.send(self)
@ -156,8 +164,8 @@ class Content:
for prop in self.mandatory_properties:
if not hasattr(self, prop):
logger.error(
"Skipping %s: could not find information about '%s'",
self, prop)
"Skipping %s: could not find information about '%s'", self, prop
)
return False
return True
@ -183,12 +191,13 @@ class Content:
return True
def _has_valid_status(self):
if hasattr(self, 'allowed_statuses'):
if hasattr(self, "allowed_statuses"):
if self.status not in self.allowed_statuses:
logger.error(
"Unknown status '%s' for file %s, skipping it. (Not in %s)",
self.status,
self, self.allowed_statuses
self,
self.allowed_statuses,
)
return False
@ -198,42 +207,48 @@ class Content:
def is_valid(self):
"""Validate Content"""
# Use all() to not short circuit and get results of all validations
return all([self._has_valid_mandatory_properties(),
self._has_valid_save_as(),
self._has_valid_status()])
return all(
[
self._has_valid_mandatory_properties(),
self._has_valid_save_as(),
self._has_valid_status(),
]
)
@property
def url_format(self):
"""Returns the URL, formatted with the proper values"""
metadata = copy.copy(self.metadata)
path = self.metadata.get('path', self.get_relative_source_path())
metadata.update({
'path': path_to_url(path),
'slug': getattr(self, 'slug', ''),
'lang': getattr(self, 'lang', 'en'),
'date': getattr(self, 'date', datetime.datetime.now()),
'author': self.author.slug if hasattr(self, 'author') else '',
'category': self.category.slug if hasattr(self, 'category') else ''
})
path = self.metadata.get("path", self.get_relative_source_path())
metadata.update(
{
"path": path_to_url(path),
"slug": getattr(self, "slug", ""),
"lang": getattr(self, "lang", "en"),
"date": getattr(self, "date", datetime.datetime.now()),
"author": self.author.slug if hasattr(self, "author") else "",
"category": self.category.slug if hasattr(self, "category") else "",
}
)
return metadata
def _expand_settings(self, key, klass=None):
if not klass:
klass = self.__class__.__name__
fq_key = ('{}_{}'.format(klass, key)).upper()
fq_key = ("{}_{}".format(klass, key)).upper()
return str(self.settings[fq_key]).format(**self.url_format)
def get_url_setting(self, key):
if hasattr(self, 'override_' + key):
return getattr(self, 'override_' + key)
key = key if self.in_default_lang else 'lang_%s' % key
if hasattr(self, "override_" + key):
return getattr(self, "override_" + key)
key = key if self.in_default_lang else "lang_%s" % key
return self._expand_settings(key)
def _link_replacer(self, siteurl, m):
what = m.group('what')
value = urlparse(m.group('value'))
what = m.group("what")
value = urlparse(m.group("value"))
path = value.path
origin = m.group('path')
origin = m.group("path")
# urllib.parse.urljoin() produces `a.html` for urljoin("..", "a.html")
# so if RELATIVE_URLS are enabled, we fall back to os.path.join() to
@ -241,7 +256,7 @@ class Content:
# `baz/http://foo/bar.html` for join("baz", "http://foo/bar.html")
# instead of correct "http://foo/bar.html", so one has to pick a side
# as there is no silver bullet.
if self.settings['RELATIVE_URLS']:
if self.settings["RELATIVE_URLS"]:
joiner = os.path.join
else:
joiner = urljoin
@ -251,16 +266,17 @@ class Content:
# os.path.join()), so in order to get a correct answer one needs to
# append a trailing slash to siteurl in that case. This also makes
# the new behavior fully compatible with Pelican 3.7.1.
if not siteurl.endswith('/'):
siteurl += '/'
if not siteurl.endswith("/"):
siteurl += "/"
# XXX Put this in a different location.
if what in {'filename', 'static', 'attach'}:
if what in {"filename", "static", "attach"}:
def _get_linked_content(key, url):
nonlocal value
def _find_path(path):
if path.startswith('/'):
if path.startswith("/"):
path = path[1:]
else:
# relative to the source path of this content
@ -287,59 +303,64 @@ class Content:
return result
# check if a static file is linked with {filename}
if what == 'filename' and key == 'generated_content':
linked_content = _get_linked_content('static_content', value)
if what == "filename" and key == "generated_content":
linked_content = _get_linked_content("static_content", value)
if linked_content:
logger.warning(
'{filename} used for linking to static'
' content %s in %s. Use {static} instead',
"{filename} used for linking to static"
" content %s in %s. Use {static} instead",
value.path,
self.get_relative_source_path())
self.get_relative_source_path(),
)
return linked_content
return None
if what == 'filename':
key = 'generated_content'
if what == "filename":
key = "generated_content"
else:
key = 'static_content'
key = "static_content"
linked_content = _get_linked_content(key, value)
if linked_content:
if what == 'attach':
if what == "attach":
linked_content.attach_to(self)
origin = joiner(siteurl, linked_content.url)
origin = origin.replace('\\', '/') # for Windows paths.
origin = origin.replace("\\", "/") # for Windows paths.
else:
logger.warning(
"Unable to find '%s', skipping url replacement.",
value.geturl(), extra={
'limit_msg': ("Other resources were not found "
"and their urls not replaced")})
elif what == 'category':
value.geturl(),
extra={
"limit_msg": (
"Other resources were not found "
"and their urls not replaced"
)
},
)
elif what == "category":
origin = joiner(siteurl, Category(path, self.settings).url)
elif what == 'tag':
elif what == "tag":
origin = joiner(siteurl, Tag(path, self.settings).url)
elif what == 'index':
origin = joiner(siteurl, self.settings['INDEX_SAVE_AS'])
elif what == 'author':
elif what == "index":
origin = joiner(siteurl, self.settings["INDEX_SAVE_AS"])
elif what == "author":
origin = joiner(siteurl, Author(path, self.settings).url)
else:
logger.warning(
"Replacement Indicator '%s' not recognized, "
"skipping replacement",
what)
"Replacement Indicator '%s' not recognized, " "skipping replacement",
what,
)
# keep all other parts, such as query, fragment, etc.
parts = list(value)
parts[2] = origin
origin = urlunparse(parts)
return ''.join((m.group('markup'), m.group('quote'), origin,
m.group('quote')))
return "".join((m.group("markup"), m.group("quote"), origin, m.group("quote")))
def _get_intrasite_link_regex(self):
intrasite_link_regex = self.settings['INTRASITE_LINK_REGEX']
intrasite_link_regex = self.settings["INTRASITE_LINK_REGEX"]
regex = r"""
(?P<markup><[^\>]+ # match tag with all url-value attributes
(?:href|src|poster|data|cite|formaction|action|content)\s*=\s*)
@ -369,28 +390,28 @@ class Content:
static_links = set()
hrefs = self._get_intrasite_link_regex()
for m in hrefs.finditer(self._content):
what = m.group('what')
value = urlparse(m.group('value'))
what = m.group("what")
value = urlparse(m.group("value"))
path = value.path
if what not in {'static', 'attach'}:
if what not in {"static", "attach"}:
continue
if path.startswith('/'):
if path.startswith("/"):
path = path[1:]
else:
# relative to the source path of this content
path = self.get_relative_source_path(
os.path.join(self.relative_dir, path)
)
path = path.replace('%20', ' ')
path = path.replace("%20", " ")
static_links.add(path)
return static_links
def get_siteurl(self):
return self._context.get('localsiteurl', '')
return self._context.get("localsiteurl", "")
@memoized
def get_content(self, siteurl):
if hasattr(self, '_get_content'):
if hasattr(self, "_get_content"):
content = self._get_content()
else:
content = self._content
@ -407,15 +428,17 @@ class Content:
This is based on the summary metadata if set, otherwise truncate the
content.
"""
if 'summary' in self.metadata:
return self.metadata['summary']
if "summary" in self.metadata:
return self.metadata["summary"]
if self.settings['SUMMARY_MAX_LENGTH'] is None:
if self.settings["SUMMARY_MAX_LENGTH"] is None:
return self.content
return truncate_html_words(self.content,
self.settings['SUMMARY_MAX_LENGTH'],
self.settings['SUMMARY_END_SUFFIX'])
return truncate_html_words(
self.content,
self.settings["SUMMARY_MAX_LENGTH"],
self.settings["SUMMARY_END_SUFFIX"],
)
@property
def summary(self):
@ -424,8 +447,10 @@ class Content:
def _get_summary(self):
"""deprecated function to access summary"""
logger.warning('_get_summary() has been deprecated since 3.6.4. '
'Use the summary decorator instead')
logger.warning(
"_get_summary() has been deprecated since 3.6.4. "
"Use the summary decorator instead"
)
return self.summary
@summary.setter
@ -444,14 +469,14 @@ class Content:
@property
def url(self):
return self.get_url_setting('url')
return self.get_url_setting("url")
@property
def save_as(self):
return self.get_url_setting('save_as')
return self.get_url_setting("save_as")
def _get_template(self):
if hasattr(self, 'template') and self.template is not None:
if hasattr(self, "template") and self.template is not None:
return self.template
else:
return self.default_template
@ -470,11 +495,10 @@ class Content:
return posixize_path(
os.path.relpath(
os.path.abspath(os.path.join(
self.settings['PATH'],
source_path)),
os.path.abspath(self.settings['PATH'])
))
os.path.abspath(os.path.join(self.settings["PATH"], source_path)),
os.path.abspath(self.settings["PATH"]),
)
)
@property
def relative_dir(self):
@ -482,85 +506,84 @@ class Content:
os.path.dirname(
os.path.relpath(
os.path.abspath(self.source_path),
os.path.abspath(self.settings['PATH']))))
os.path.abspath(self.settings["PATH"]),
)
)
)
def refresh_metadata_intersite_links(self):
for key in self.settings['FORMATTED_FIELDS']:
if key in self.metadata and key != 'summary':
value = self._update_content(
self.metadata[key],
self.get_siteurl()
)
for key in self.settings["FORMATTED_FIELDS"]:
if key in self.metadata and key != "summary":
value = self._update_content(self.metadata[key], self.get_siteurl())
self.metadata[key] = value
setattr(self, key.lower(), value)
# _summary is an internal variable that some plugins may be writing to,
# so ensure changes to it are picked up
if ('summary' in self.settings['FORMATTED_FIELDS'] and
'summary' in self.metadata):
self._summary = self._update_content(
self._summary,
self.get_siteurl()
)
self.metadata['summary'] = self._summary
if (
"summary" in self.settings["FORMATTED_FIELDS"]
and "summary" in self.metadata
):
self._summary = self._update_content(self._summary, self.get_siteurl())
self.metadata["summary"] = self._summary
class Page(Content):
mandatory_properties = ('title',)
allowed_statuses = ('published', 'hidden', 'draft')
default_status = 'published'
default_template = 'page'
mandatory_properties = ("title",)
allowed_statuses = ("published", "hidden", "draft")
default_status = "published"
default_template = "page"
def _expand_settings(self, key):
klass = 'draft_page' if self.status == 'draft' else None
klass = "draft_page" if self.status == "draft" else None
return super()._expand_settings(key, klass)
class Article(Content):
mandatory_properties = ('title', 'date', 'category')
allowed_statuses = ('published', 'hidden', 'draft')
default_status = 'published'
default_template = 'article'
mandatory_properties = ("title", "date", "category")
allowed_statuses = ("published", "hidden", "draft")
default_status = "published"
default_template = "article"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# handle WITH_FUTURE_DATES (designate article to draft based on date)
if not self.settings['WITH_FUTURE_DATES'] and hasattr(self, 'date'):
if not self.settings["WITH_FUTURE_DATES"] and hasattr(self, "date"):
if self.date.tzinfo is None:
now = datetime.datetime.now()
else:
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
if self.date > now:
self.status = 'draft'
self.status = "draft"
# if we are a draft and there is no date provided, set max datetime
if not hasattr(self, 'date') and self.status == 'draft':
if not hasattr(self, "date") and self.status == "draft":
self.date = datetime.datetime.max.replace(tzinfo=self.timezone)
def _expand_settings(self, key):
klass = 'draft' if self.status == 'draft' else 'article'
klass = "draft" if self.status == "draft" else "article"
return super()._expand_settings(key, klass)
class Static(Content):
mandatory_properties = ('title',)
default_status = 'published'
mandatory_properties = ("title",)
default_status = "published"
default_template = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._output_location_referenced = False
@deprecated_attribute(old='filepath', new='source_path', since=(3, 2, 0))
@deprecated_attribute(old="filepath", new="source_path", since=(3, 2, 0))
def filepath():
return None
@deprecated_attribute(old='src', new='source_path', since=(3, 2, 0))
@deprecated_attribute(old="src", new="source_path", since=(3, 2, 0))
def src():
return None
@deprecated_attribute(old='dst', new='save_as', since=(3, 2, 0))
@deprecated_attribute(old="dst", new="save_as", since=(3, 2, 0))
def dst():
return None
@ -577,8 +600,7 @@ class Static(Content):
return super().save_as
def attach_to(self, content):
"""Override our output directory with that of the given content object.
"""
"""Override our output directory with that of the given content object."""
# Determine our file's new output path relative to the linking
# document. If it currently lives beneath the linking
@ -589,8 +611,7 @@ class Static(Content):
tail_path = os.path.relpath(self.source_path, linking_source_dir)
if tail_path.startswith(os.pardir + os.sep):
tail_path = os.path.basename(tail_path)
new_save_as = os.path.join(
os.path.dirname(content.save_as), tail_path)
new_save_as = os.path.join(os.path.dirname(content.save_as), tail_path)
# We do not build our new url by joining tail_path with the linking
# document's url, because we cannot know just by looking at the latter
@ -609,12 +630,14 @@ class Static(Content):
"%s because %s. Falling back to "
"{filename} link behavior instead.",
content.get_relative_source_path(),
self.get_relative_source_path(), reason,
extra={'limit_msg': "More {attach} warnings silenced."})
self.get_relative_source_path(),
reason,
extra={"limit_msg": "More {attach} warnings silenced."},
)
# We never override an override, because we don't want to interfere
# with user-defined overrides that might be in EXTRA_PATH_METADATA.
if hasattr(self, 'override_save_as') or hasattr(self, 'override_url'):
if hasattr(self, "override_save_as") or hasattr(self, "override_url"):
if new_save_as != self.save_as or new_url != self.url:
_log_reason("its output location was already overridden")
return

File diff suppressed because it is too large Load diff

View file

@ -4,9 +4,7 @@ from collections import defaultdict
from rich.console import Console
from rich.logging import RichHandler
__all__ = [
'init'
]
__all__ = ["init"]
console = Console()
@ -34,8 +32,8 @@ class LimitFilter(logging.Filter):
return True
# extract group
group = record.__dict__.get('limit_msg', None)
group_args = record.__dict__.get('limit_args', ())
group = record.__dict__.get("limit_msg", None)
group_args = record.__dict__.get("limit_args", ())
# ignore record if it was already raised
message_key = (record.levelno, record.getMessage())
@ -50,7 +48,7 @@ class LimitFilter(logging.Filter):
if logger_level > logging.DEBUG:
template_key = (record.levelno, record.msg)
message_key = (record.levelno, record.getMessage())
if (template_key in self._ignore or message_key in self._ignore):
if template_key in self._ignore or message_key in self._ignore:
return False
# check if we went over threshold
@ -90,12 +88,12 @@ class FatalLogger(LimitLogger):
def warning(self, *args, **kwargs):
super().warning(*args, **kwargs)
if FatalLogger.warnings_fatal:
raise RuntimeError('Warning encountered')
raise RuntimeError("Warning encountered")
def error(self, *args, **kwargs):
super().error(*args, **kwargs)
if FatalLogger.errors_fatal:
raise RuntimeError('Error encountered')
raise RuntimeError("Error encountered")
logging.setLoggerClass(FatalLogger)
@ -103,17 +101,19 @@ logging.setLoggerClass(FatalLogger)
logging.getLogger().__class__ = FatalLogger
def init(level=None, fatal='', handler=RichHandler(console=console), name=None,
logs_dedup_min_level=None):
FatalLogger.warnings_fatal = fatal.startswith('warning')
def init(
level=None,
fatal="",
handler=RichHandler(console=console),
name=None,
logs_dedup_min_level=None,
):
FatalLogger.warnings_fatal = fatal.startswith("warning")
FatalLogger.errors_fatal = bool(fatal)
LOG_FORMAT = "%(message)s"
logging.basicConfig(
level=level,
format=LOG_FORMAT,
datefmt="[%H:%M:%S]",
handlers=[handler]
level=level, format=LOG_FORMAT, datefmt="[%H:%M:%S]", handlers=[handler]
)
logger = logging.getLogger(name)
@ -126,17 +126,18 @@ def init(level=None, fatal='', handler=RichHandler(console=console), name=None,
def log_warnings():
import warnings
logging.captureWarnings(True)
warnings.simplefilter("default", DeprecationWarning)
init(logging.DEBUG, name='py.warnings')
init(logging.DEBUG, name="py.warnings")
if __name__ == '__main__':
if __name__ == "__main__":
init(level=logging.DEBUG, name=__name__)
root_logger = logging.getLogger(__name__)
root_logger.debug('debug')
root_logger.info('info')
root_logger.warning('warning')
root_logger.error('error')
root_logger.critical('critical')
root_logger.debug("debug")
root_logger.info("info")
root_logger.warning("warning")
root_logger.error("error")
root_logger.critical("critical")

View file

@ -6,8 +6,8 @@ from math import ceil
logger = logging.getLogger(__name__)
PaginationRule = namedtuple(
'PaginationRule',
'min_page URL SAVE_AS',
"PaginationRule",
"min_page URL SAVE_AS",
)
@ -19,7 +19,7 @@ class Paginator:
self.settings = settings
if per_page:
self.per_page = per_page
self.orphans = settings['DEFAULT_ORPHANS']
self.orphans = settings["DEFAULT_ORPHANS"]
else:
self.per_page = len(object_list)
self.orphans = 0
@ -32,14 +32,21 @@ class Paginator:
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.name, self.url, self.object_list[bottom:top], number,
self, self.settings)
return Page(
self.name,
self.url,
self.object_list[bottom:top],
number,
self,
self.settings,
)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
@ -48,6 +55,7 @@ class Paginator:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / (float(self.per_page) or 1)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
@ -56,6 +64,7 @@ class Paginator:
a template for loop.
"""
return list(range(1, self.num_pages + 1))
page_range = property(_get_page_range)
@ -64,7 +73,7 @@ class Page:
self.full_name = name
self.name, self.extension = os.path.splitext(name)
dn, fn = os.path.split(name)
self.base_name = dn if fn in ('index.htm', 'index.html') else self.name
self.base_name = dn if fn in ("index.htm", "index.html") else self.name
self.base_url = url
self.object_list = object_list
self.number = number
@ -72,7 +81,7 @@ class Page:
self.settings = settings
def __repr__(self):
return '<Page {} of {}>'.format(self.number, self.paginator.num_pages)
return "<Page {} of {}>".format(self.number, self.paginator.num_pages)
def has_next(self):
return self.number < self.paginator.num_pages
@ -117,7 +126,7 @@ class Page:
rule = None
# find the last matching pagination rule
for p in self.settings['PAGINATION_PATTERNS']:
for p in self.settings["PAGINATION_PATTERNS"]:
if p.min_page == -1:
if not self.has_next():
rule = p
@ -127,22 +136,22 @@ class Page:
rule = p
if not rule:
return ''
return ""
prop_value = getattr(rule, key)
if not isinstance(prop_value, str):
logger.warning('%s is set to %s', key, prop_value)
logger.warning("%s is set to %s", key, prop_value)
return prop_value
# URL or SAVE_AS is a string, format it with a controlled context
context = {
'save_as': self.full_name,
'url': self.base_url,
'name': self.name,
'base_name': self.base_name,
'extension': self.extension,
'number': self.number,
"save_as": self.full_name,
"url": self.base_url,
"name": self.name,
"base_name": self.base_name,
"extension": self.extension,
"number": self.number,
}
ret = prop_value.format(**context)
@ -155,9 +164,9 @@ class Page:
# changed to lstrip() because that would remove all leading slashes and
# thus make the workaround impossible. See
# test_custom_pagination_pattern() for a verification of this.
if ret.startswith('/'):
if ret.startswith("/"):
ret = ret[1:]
return ret
url = property(functools.partial(_from_settings, key='URL'))
save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
url = property(functools.partial(_from_settings, key="URL"))
save_as = property(functools.partial(_from_settings, key="SAVE_AS"))

View file

@ -24,26 +24,26 @@ def get_namespace_plugins(ns_pkg=None):
return {
name: importlib.import_module(name)
for finder, name, ispkg
in iter_namespace(ns_pkg)
for finder, name, ispkg in iter_namespace(ns_pkg)
if ispkg
}
def list_plugins(ns_pkg=None):
from pelican.log import init as init_logging
init_logging(logging.INFO)
ns_plugins = get_namespace_plugins(ns_pkg)
if ns_plugins:
logger.info('Plugins found:\n' + '\n'.join(ns_plugins))
logger.info("Plugins found:\n" + "\n".join(ns_plugins))
else:
logger.info('No plugins are installed')
logger.info("No plugins are installed")
def load_legacy_plugin(plugin, plugin_paths):
if '.' in plugin:
if "." in plugin:
# it is in a package, try to resolve package first
package, _, _ = plugin.rpartition('.')
package, _, _ = plugin.rpartition(".")
load_legacy_plugin(package, plugin_paths)
# Try to find plugin in PLUGIN_PATHS
@ -52,7 +52,7 @@ def load_legacy_plugin(plugin, plugin_paths):
# If failed, try to find it in normal importable locations
spec = importlib.util.find_spec(plugin)
if spec is None:
raise ImportError('Cannot import plugin `{}`'.format(plugin))
raise ImportError("Cannot import plugin `{}`".format(plugin))
else:
# Avoid loading the same plugin twice
if spec.name in sys.modules:
@ -78,30 +78,28 @@ def load_legacy_plugin(plugin, plugin_paths):
def load_plugins(settings):
logger.debug('Finding namespace plugins')
logger.debug("Finding namespace plugins")
namespace_plugins = get_namespace_plugins()
if namespace_plugins:
logger.debug('Namespace plugins found:\n' +
'\n'.join(namespace_plugins))
logger.debug("Namespace plugins found:\n" + "\n".join(namespace_plugins))
plugins = []
if settings.get('PLUGINS') is not None:
for plugin in settings['PLUGINS']:
if settings.get("PLUGINS") is not None:
for plugin in settings["PLUGINS"]:
if isinstance(plugin, str):
logger.debug('Loading plugin `%s`', plugin)
logger.debug("Loading plugin `%s`", plugin)
# try to find in namespace plugins
if plugin in namespace_plugins:
plugin = namespace_plugins[plugin]
elif 'pelican.plugins.{}'.format(plugin) in namespace_plugins:
plugin = namespace_plugins['pelican.plugins.{}'.format(
plugin)]
elif "pelican.plugins.{}".format(plugin) in namespace_plugins:
plugin = namespace_plugins["pelican.plugins.{}".format(plugin)]
# try to import it
else:
try:
plugin = load_legacy_plugin(
plugin,
settings.get('PLUGIN_PATHS', []))
plugin, settings.get("PLUGIN_PATHS", [])
)
except ImportError as e:
logger.error('Cannot load plugin `%s`\n%s', plugin, e)
logger.error("Cannot load plugin `%s`\n%s", plugin, e)
continue
plugins.append(plugin)
else:

View file

@ -2,48 +2,48 @@ from blinker import signal
# Run-level signals:
initialized = signal('pelican_initialized')
get_generators = signal('get_generators')
all_generators_finalized = signal('all_generators_finalized')
get_writer = signal('get_writer')
finalized = signal('pelican_finalized')
initialized = signal("pelican_initialized")
get_generators = signal("get_generators")
all_generators_finalized = signal("all_generators_finalized")
get_writer = signal("get_writer")
finalized = signal("pelican_finalized")
# Reader-level signals
readers_init = signal('readers_init')
readers_init = signal("readers_init")
# Generator-level signals
generator_init = signal('generator_init')
generator_init = signal("generator_init")
article_generator_init = signal('article_generator_init')
article_generator_pretaxonomy = signal('article_generator_pretaxonomy')
article_generator_finalized = signal('article_generator_finalized')
article_generator_write_article = signal('article_generator_write_article')
article_writer_finalized = signal('article_writer_finalized')
article_generator_init = signal("article_generator_init")
article_generator_pretaxonomy = signal("article_generator_pretaxonomy")
article_generator_finalized = signal("article_generator_finalized")
article_generator_write_article = signal("article_generator_write_article")
article_writer_finalized = signal("article_writer_finalized")
page_generator_init = signal('page_generator_init')
page_generator_finalized = signal('page_generator_finalized')
page_generator_write_page = signal('page_generator_write_page')
page_writer_finalized = signal('page_writer_finalized')
page_generator_init = signal("page_generator_init")
page_generator_finalized = signal("page_generator_finalized")
page_generator_write_page = signal("page_generator_write_page")
page_writer_finalized = signal("page_writer_finalized")
static_generator_init = signal('static_generator_init')
static_generator_finalized = signal('static_generator_finalized')
static_generator_init = signal("static_generator_init")
static_generator_finalized = signal("static_generator_finalized")
# Page-level signals
article_generator_preread = signal('article_generator_preread')
article_generator_context = signal('article_generator_context')
article_generator_preread = signal("article_generator_preread")
article_generator_context = signal("article_generator_context")
page_generator_preread = signal('page_generator_preread')
page_generator_context = signal('page_generator_context')
page_generator_preread = signal("page_generator_preread")
page_generator_context = signal("page_generator_context")
static_generator_preread = signal('static_generator_preread')
static_generator_context = signal('static_generator_context')
static_generator_preread = signal("static_generator_preread")
static_generator_context = signal("static_generator_context")
content_object_init = signal('content_object_init')
content_object_init = signal("content_object_init")
# Writers signals
content_written = signal('content_written')
feed_generated = signal('feed_generated')
feed_written = signal('feed_written')
content_written = signal("content_written")
feed_generated = signal("feed_generated")
feed_written = signal("feed_written")

View file

@ -31,33 +31,29 @@ except ImportError:
_DISCARD = object()
DUPLICATES_DEFINITIONS_ALLOWED = {
'tags': False,
'date': False,
'modified': False,
'status': False,
'category': False,
'author': False,
'save_as': False,
'url': False,
'authors': False,
'slug': False
"tags": False,
"date": False,
"modified": False,
"status": False,
"category": False,
"author": False,
"save_as": False,
"url": False,
"authors": False,
"slug": False,
}
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
"tags": lambda x, y: ([Tag(tag, y) for tag in ensure_metadata_list(x)] or _DISCARD),
"date": lambda x, y: get_date(x.replace("_", " ")),
"modified": lambda x, y: get_date(x),
"status": lambda x, y: x.strip() or _DISCARD,
"category": lambda x, y: _process_if_nonempty(Category, x, y),
"author": lambda x, y: _process_if_nonempty(Author, x, y),
"authors": lambda x, y: (
[Author(author, y) for author in ensure_metadata_list(x)] or _DISCARD
),
"slug": lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
@ -65,25 +61,23 @@ logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format.
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, str):
if ';' in text:
text = text.split(';')
if ";" in text:
text = text.split(";")
else:
text = text.split(',')
text = text.split(",")
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
return list(OrderedDict.fromkeys([v for v in (w.strip() for w in text) if v]))
def _process_if_nonempty(processor, name, settings):
@ -112,8 +106,9 @@ class BaseReader:
Markdown).
"""
enabled = True
file_extensions = ['static']
file_extensions = ["static"]
extensions = None
def __init__(self, settings):
@ -132,13 +127,12 @@ class BaseReader:
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
super().__init__(document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
return "".join(self.body)
def visit_field_body(self, node):
pass
@ -154,27 +148,25 @@ def render_node_to_html(document, node, field_body_translator_class):
class PelicanHTMLWriter(Writer):
def __init__(self):
super().__init__()
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
if node.hasattr("explanation"):
attrs["title"] = node["explanation"]
self.body.append(self.starttag(node, "abbr", "", **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
self.body.append("</abbr>")
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
node["alt"] = node.get("alt", "")
return HTMLTranslator.visit_image(self, node)
@ -194,7 +186,7 @@ class RstReader(BaseReader):
"""
enabled = bool(docutils)
file_extensions = ['rst']
file_extensions = ["rst"]
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
@ -202,25 +194,28 @@ class RstReader(BaseReader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
lang_code = self.settings.get('DEFAULT_LANG', 'en')
lang_code = self.settings.get("DEFAULT_LANG", "en")
if get_docutils_lang(lang_code):
self._language_code = lang_code
else:
logger.warning("Docutils has no localization for '%s'."
" Using 'en' instead.", lang_code)
self._language_code = 'en'
logger.warning(
"Docutils has no localization for '%s'." " Using 'en' instead.",
lang_code,
)
self._language_code = "en"
def _parse_metadata(self, document, source_path):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
formatted_fields = self.settings["FORMATTED_FIELDS"]
output = {}
if document.first_child_matching_class(docutils.nodes.title) is None:
logger.warning(
'Document title missing in file %s: '
'Ensure exactly one top level section',
source_path)
"Document title missing in file %s: "
"Ensure exactly one top level section",
source_path,
)
try:
# docutils 0.18.1+
@ -231,16 +226,16 @@ class RstReader(BaseReader):
for docinfo in nodes:
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
if element.tagname == "field": # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name.lower() in formatted_fields:
value = render_node_to_html(
document, body_elem,
self.field_body_translator_class)
document, body_elem, self.field_body_translator_class
)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
elif element.tagname == "authors": # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
@ -252,22 +247,24 @@ class RstReader(BaseReader):
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'language_code': self._language_code,
'halt_level': 2,
'traceback': True,
'warning_stream': StringIO(),
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
extra_params = {
"initial_header_level": "2",
"syntax_highlight": "short",
"input_encoding": "utf-8",
"language_code": self._language_code,
"halt_level": 2,
"traceback": True,
"warning_stream": StringIO(),
"embed_stylesheet": False,
}
user_params = self.settings.get("DOCUTILS_SETTINGS")
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(),
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
writer=self.writer_class(), destination_class=docutils.io.StringOutput
)
pub.set_components("standalone", "restructuredtext", "html")
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish()
@ -277,10 +274,10 @@ class RstReader(BaseReader):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
content = parts.get("body")
metadata = self._parse_metadata(pub.document, source_path)
metadata.setdefault('title', parts.get('title'))
metadata.setdefault("title", parts.get("title"))
return content, metadata
@ -289,26 +286,26 @@ class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
file_extensions = ["md", "markdown", "mkd", "mdown"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
settings = self.settings['MARKDOWN']
settings.setdefault('extension_configs', {})
settings.setdefault('extensions', [])
for extension in settings['extension_configs'].keys():
if extension not in settings['extensions']:
settings['extensions'].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']:
settings['extensions'].append('markdown.extensions.meta')
settings = self.settings["MARKDOWN"]
settings.setdefault("extension_configs", {})
settings.setdefault("extensions", [])
for extension in settings["extension_configs"].keys():
if extension not in settings["extensions"]:
settings["extensions"].append(extension)
if "markdown.extensions.meta" not in settings["extensions"]:
settings["extensions"].append("markdown.extensions.meta")
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
formatted_fields = self.settings["FORMATTED_FIELDS"]
# prevent metadata extraction in fields
self._md.preprocessors.deregister('meta')
self._md.preprocessors.deregister("meta")
output = {}
for name, value in meta.items():
@ -323,9 +320,10 @@ class MarkdownReader(BaseReader):
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
if len(value) > 1:
logger.warning(
'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
"Duplicate definition of `%s` " "for %s. Using first one.",
name,
self._source_path,
)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
@ -339,11 +337,11 @@ class MarkdownReader(BaseReader):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN'])
self._md = Markdown(**self.settings["MARKDOWN"])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, 'Meta'):
if hasattr(self._md, "Meta"):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
@ -353,17 +351,17 @@ class MarkdownReader(BaseReader):
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
file_extensions = ["htm", "html"]
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
super().__init__(convert_charrefs=False)
self.body = ''
self.body = ""
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._data_buffer = ""
self._filename = filename
@ -374,59 +372,59 @@ class HTMLReader(BaseReader):
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
if tag == "head" and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
elif tag == "title" and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._data_buffer = ""
elif tag == "body" and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._data_buffer = ""
elif tag == "meta" and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if tag == "head":
if self._in_head:
self._in_head = False
self._in_top_level = True
elif self._in_head and tag == 'title':
elif self._in_head and tag == "title":
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.metadata["title"] = self._data_buffer
elif tag == "body":
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag))
self._data_buffer += "</{}>".format(escape(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
if tag == "meta" and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
self._data_buffer += "<!--{}-->".format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
self._data_buffer += "&{};".format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
self._data_buffer += "&#{};".format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag))
result = "<{}".format(escape(tag))
for k, v in attrs:
result += ' ' + escape(k)
result += " " + escape(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
@ -435,33 +433,39 @@ class HTMLReader(BaseReader):
else:
result += '="{}"'.format(escape(v, quote=False))
if close_tag:
return result + ' />'
return result + '>'
return result + " />"
return result + ">"
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
name = self._attr_value(attrs, "name")
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
attr_serialized = ", ".join(attr_list)
logger.warning(
"Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename,
attr_serialized,
)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
contents = self._attr_value(attrs, "content", "")
if not contents:
contents = self._attr_value(attrs, 'contents', '')
contents = self._attr_value(attrs, "contents", "")
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"})
extra={
"limit_msg": "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"
},
)
if name == 'keywords':
name = 'tags'
if name == "keywords":
name = "tags"
if name in self.metadata:
# if this metadata already exists (i.e. a previous tag with the
@ -501,22 +505,23 @@ class Readers(FileStampDataCacher):
"""
def __init__(self, settings=None, cache_name=''):
def __init__(self, settings=None, cache_name=""):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
logger.debug(
"Missing dependencies for %s", ", ".join(cls.file_extensions)
)
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
if self.settings["READERS"]:
self.reader_classes.update(self.settings["READERS"])
signals.readers_init.send(self)
@ -527,53 +532,67 @@ class Readers(FileStampDataCacher):
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
cache_this_level = (
cache_name != "" and self.settings["CONTENT_CACHING_LAYER"] == "reader"
)
caching_policy = cache_this_level and self.settings["CACHE_CONTENT"]
load_policy = cache_this_level and self.settings["LOAD_CONTENT_CACHE"]
super().__init__(settings, cache_name, caching_policy, load_policy)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
def read_file(
self,
base_path,
path,
content_class=Page,
fmt=None,
context=None,
preread_signal=None,
preread_sender=None,
context_signal=None,
context_sender=None,
):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__)
logger.debug("Read file %s -> %s", source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
raise TypeError("Pelican does not know how to parse %s", path)
if preread_signal:
logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
logger.debug("Signal %s.send(%s)", preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata)))
metadata = _filter_discardable_metadata(
default_metadata(settings=self.settings, process=reader.process_metadata)
)
metadata.update(
path_metadata(
full_path=path, source_path=source_path, settings=self.settings
)
)
metadata.update(
_filter_discardable_metadata(
parse_path_metadata(
source_path=source_path,
settings=self.settings,
process=reader.process_metadata,
)
)
)
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
metadata["reader"] = reader_name.replace("Reader", "").lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
@ -587,14 +606,14 @@ class Readers(FileStampDataCacher):
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
if self.settings["TYPOGRIFY"]:
from typogrify.filters import typogrify
import smartypants
typogrify_dashes = self.settings['TYPOGRIFY_DASHES']
if typogrify_dashes == 'oldschool':
typogrify_dashes = self.settings["TYPOGRIFY_DASHES"]
if typogrify_dashes == "oldschool":
smartypants.Attr.default = smartypants.Attr.set2
elif typogrify_dashes == 'oldschool_inverted':
elif typogrify_dashes == "oldschool_inverted":
smartypants.Attr.default = smartypants.Attr.set3
else:
smartypants.Attr.default = smartypants.Attr.set1
@ -608,31 +627,32 @@ class Readers(FileStampDataCacher):
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
return typogrify(text, self.settings["TYPOGRIFY_IGNORE_TAGS"])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if 'title' in metadata:
metadata['title'] = typogrify_wrapper(metadata['title'])
if "title" in metadata:
metadata["title"] = typogrify_wrapper(metadata["title"])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if "summary" in metadata:
metadata["summary"] = typogrify_wrapper(metadata["summary"])
if context_signal:
logger.debug(
'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
"Signal %s.send(%s, <metadata>)", context_signal.name, context_sender
)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
return content_class(
content=content,
metadata=metadata,
settings=self.settings,
source_path=path,
context=context,
)
def find_empty_alt(content, path):
@ -642,7 +662,8 @@ def find_empty_alt(content, path):
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
imgs = re.compile(
r"""
(?:
# src before alt
<img
@ -658,53 +679,57 @@ def find_empty_alt(content, path):
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
""",
re.X,
)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
"Empty alt attribute for image %s in %s",
os.path.basename(match[1] + match[5]),
path,
extra={"limit_msg": "Other images have empty alt attributes"},
)
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
for name, value in dict(settings.get("DEFAULT_METADATA", {})).items():
if process:
value = process(name, value)
metadata[name] = value
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if "DEFAULT_CATEGORY" in settings:
value = settings["DEFAULT_CATEGORY"]
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
if isinstance(settings['DEFAULT_DATE'], str):
metadata['date'] = get_date(settings['DEFAULT_DATE'])
value = process("category", value)
metadata["category"] = value
if settings.get("DEFAULT_DATE", None) and settings["DEFAULT_DATE"] != "fs":
if isinstance(settings["DEFAULT_DATE"], str):
metadata["date"] = get_date(settings["DEFAULT_DATE"])
else:
metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE'])
metadata["date"] = datetime.datetime(*settings["DEFAULT_DATE"])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime)
metadata['modified'] = metadata['date']
if settings.get("DEFAULT_DATE", None) == "fs":
metadata["date"] = datetime.datetime.fromtimestamp(
os.stat(full_path).st_mtime
)
metadata["modified"] = metadata["date"]
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
# parent directories. Sorting EPM first ensures that the most specific
# path wins conflicts.
epm = settings.get('EXTRA_PATH_METADATA', {})
epm = settings.get("EXTRA_PATH_METADATA", {})
for path, meta in sorted(epm.items()):
# Enforce a trailing slash when checking for parent directories.
# This prevents false positives when one file or directory's name
# is a prefix of another's.
dirpath = posixize_path(os.path.join(path, ''))
dirpath = posixize_path(os.path.join(path, ""))
if source_path == path or source_path.startswith(dirpath):
metadata.update(meta)
@ -736,11 +761,10 @@ def parse_path_metadata(source_path, settings=None, process=None):
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
for key, data in [("FILENAME_METADATA", base), ("PATH_METADATA", source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
if settings.get("USE_FOLDER_AS_CATEGORY", None):
checks.append(("(?P<category>.*)", subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)

View file

@ -11,26 +11,26 @@ import pelican.settings as pys
class Pygments(Directive):
""" Source code syntax highlighting.
"""
"""Source code syntax highlighting."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'anchorlinenos': directives.flag,
'classprefix': directives.unchanged,
'hl_lines': directives.unchanged,
'lineanchors': directives.unchanged,
'linenos': directives.unchanged,
'linenospecial': directives.nonnegative_int,
'linenostart': directives.nonnegative_int,
'linenostep': directives.nonnegative_int,
'lineseparator': directives.unchanged,
'linespans': directives.unchanged,
'nobackground': directives.flag,
'nowrap': directives.flag,
'tagsfile': directives.unchanged,
'tagurlformat': directives.unchanged,
"anchorlinenos": directives.flag,
"classprefix": directives.unchanged,
"hl_lines": directives.unchanged,
"lineanchors": directives.unchanged,
"linenos": directives.unchanged,
"linenospecial": directives.nonnegative_int,
"linenostart": directives.nonnegative_int,
"linenostep": directives.nonnegative_int,
"lineseparator": directives.unchanged,
"linespans": directives.unchanged,
"nobackground": directives.flag,
"nowrap": directives.flag,
"tagsfile": directives.unchanged,
"tagurlformat": directives.unchanged,
}
has_content = True
@ -49,28 +49,30 @@ class Pygments(Directive):
if k not in self.options:
self.options[k] = v
if ('linenos' in self.options and
self.options['linenos'] not in ('table', 'inline')):
if self.options['linenos'] == 'none':
self.options.pop('linenos')
if "linenos" in self.options and self.options["linenos"] not in (
"table",
"inline",
):
if self.options["linenos"] == "none":
self.options.pop("linenos")
else:
self.options['linenos'] = 'table'
self.options["linenos"] = "table"
for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
for flag in ("nowrap", "nobackground", "anchorlinenos"):
if flag in self.options:
self.options[flag] = True
# noclasses should already default to False, but just in case...
formatter = HtmlFormatter(noclasses=False, **self.options)
parsed = highlight('\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
parsed = highlight("\n".join(self.content), lexer, formatter)
return [nodes.raw("", parsed, format="html")]
directives.register_directive('code-block', Pygments)
directives.register_directive('sourcecode', Pygments)
directives.register_directive("code-block", Pygments)
directives.register_directive("sourcecode", Pygments)
_abbr_re = re.compile(r'\((.*)\)$', re.DOTALL)
_abbr_re = re.compile(r"\((.*)\)$", re.DOTALL)
class abbreviation(nodes.Inline, nodes.TextElement):
@ -82,9 +84,9 @@ def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
m = _abbr_re.search(text)
if m is None:
return [abbreviation(text, text)], []
abbr = text[:m.start()].strip()
abbr = text[: m.start()].strip()
expl = m.group(1)
return [abbreviation(abbr, abbr, explanation=expl)], []
roles.register_local_role('abbr', abbr_role)
roles.register_local_role("abbr", abbr_role)

View file

@ -14,38 +14,47 @@ except ImportError:
from pelican.log import console # noqa: F401
from pelican.log import init as init_logging
logger = logging.getLogger(__name__)
def parse_arguments():
parser = argparse.ArgumentParser(
description='Pelican Development Server',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
description="Pelican Development Server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"port", default=8000, type=int, nargs="?", help="Port to Listen On"
)
parser.add_argument("server", default="", nargs="?", help="Interface to Listen On")
parser.add_argument("--ssl", action="store_true", help="Activate SSL listener")
parser.add_argument(
"--cert",
default="./cert.pem",
nargs="?",
help="Path to certificate file. " + "Relative to current directory",
)
parser.add_argument(
"--key",
default="./key.pem",
nargs="?",
help="Path to certificate key file. " + "Relative to current directory",
)
parser.add_argument(
"--path",
default=".",
help="Path to pelican source directory to serve. "
+ "Relative to current directory",
)
parser.add_argument("port", default=8000, type=int, nargs="?",
help="Port to Listen On")
parser.add_argument("server", default="", nargs="?",
help="Interface to Listen On")
parser.add_argument('--ssl', action="store_true",
help='Activate SSL listener')
parser.add_argument('--cert', default="./cert.pem", nargs="?",
help='Path to certificate file. ' +
'Relative to current directory')
parser.add_argument('--key', default="./key.pem", nargs="?",
help='Path to certificate key file. ' +
'Relative to current directory')
parser.add_argument('--path', default=".",
help='Path to pelican source directory to serve. ' +
'Relative to current directory')
return parser.parse_args()
class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
SUFFIXES = ['.html', '/index.html', '/', '']
SUFFIXES = [".html", "/index.html", "/", ""]
extensions_map = {
**server.SimpleHTTPRequestHandler.extensions_map,
** {
**{
# web fonts
".oft": "font/oft",
".sfnt": "font/sfnt",
@ -57,13 +66,13 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def translate_path(self, path):
# abandon query parameters
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = path.split("?", 1)[0]
path = path.split("#", 1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
trailing_slash = path.rstrip().endswith("/")
path = urllib.parse.unquote(path)
path = posixpath.normpath(path)
words = path.split('/')
words = path.split("/")
words = filter(None, words)
path = self.base_path
for word in words:
@ -72,12 +81,12 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
path += "/"
return path
def do_GET(self):
# cut off a query string
original_path = self.path.split('?', 1)[0]
original_path = self.path.split("?", 1)[0]
# try to find file
self.path = self.get_path_that_exists(original_path)
@ -88,12 +97,12 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def get_path_that_exists(self, original_path):
# Try to strip trailing slash
trailing_slash = original_path.endswith('/')
original_path = original_path.rstrip('/')
trailing_slash = original_path.endswith("/")
original_path = original_path.rstrip("/")
# Try to detect file by applying various suffixes
tries = []
for suffix in self.SUFFIXES:
if not trailing_slash and suffix == '/':
if not trailing_slash and suffix == "/":
# if original request does not have trailing slash, skip the '/' suffix
# so that base class can redirect if needed
continue
@ -101,18 +110,17 @@ class ComplexHTTPRequestHandler(server.SimpleHTTPRequestHandler):
if os.path.exists(self.translate_path(path)):
return path
tries.append(path)
logger.warning("Unable to find `%s` or variations:\n%s",
original_path,
'\n'.join(tries))
logger.warning(
"Unable to find `%s` or variations:\n%s", original_path, "\n".join(tries)
)
return None
def guess_type(self, path):
"""Guess at the mime type for the specified file.
"""
"""Guess at the mime type for the specified file."""
mimetype = server.SimpleHTTPRequestHandler.guess_type(self, path)
# If the default guess is too generic, try the python-magic library
if mimetype == 'application/octet-stream' and magic_from_file:
if mimetype == "application/octet-stream" and magic_from_file:
mimetype = magic_from_file(path, mime=True)
return mimetype
@ -127,31 +135,33 @@ class RootedHTTPServer(server.HTTPServer):
self.RequestHandlerClass.base_path = base_path
if __name__ == '__main__':
if __name__ == "__main__":
init_logging(level=logging.INFO)
logger.warning("'python -m pelican.server' is deprecated.\nThe "
"Pelican development server should be run via "
"'pelican --listen' or 'pelican -l'.\nThis can be combined "
"with regeneration as 'pelican -lr'.\nRerun 'pelican-"
"quickstart' to get new Makefile and tasks.py files.")
logger.warning(
"'python -m pelican.server' is deprecated.\nThe "
"Pelican development server should be run via "
"'pelican --listen' or 'pelican -l'.\nThis can be combined "
"with regeneration as 'pelican -lr'.\nRerun 'pelican-"
"quickstart' to get new Makefile and tasks.py files."
)
args = parse_arguments()
RootedHTTPServer.allow_reuse_address = True
try:
httpd = RootedHTTPServer(
args.path, (args.server, args.port), ComplexHTTPRequestHandler)
args.path, (args.server, args.port), ComplexHTTPRequestHandler
)
if args.ssl:
httpd.socket = ssl.wrap_socket(
httpd.socket, keyfile=args.key,
certfile=args.cert, server_side=True)
httpd.socket, keyfile=args.key, certfile=args.cert, server_side=True
)
except ssl.SSLError as e:
logger.error("Couldn't open certificate file %s or key file %s",
args.cert, args.key)
logger.error("Could not listen on port %s, server %s.",
args.port, args.server)
sys.exit(getattr(e, 'exitcode', 1))
logger.error(
"Couldn't open certificate file %s or key file %s", args.cert, args.key
)
logger.error("Could not listen on port %s, server %s.", args.port, args.server)
sys.exit(getattr(e, "exitcode", 1))
logger.info("Serving at port %s, server %s.",
args.port, args.server)
logger.info("Serving at port %s, server %s.", args.port, args.server)
try:
httpd.serve_forever()
except KeyboardInterrupt:

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
raise ImportError(
'Importing from `pelican.signals` is deprecated. '
'Use `from pelican import signals` or `import pelican.plugins.signals` instead.'
"Importing from `pelican.signals` is deprecated. "
"Use `from pelican import signals` or `import pelican.plugins.signals` instead."
)

View file

@ -9,4 +9,4 @@ Used for pelican test
The quick brown fox .
This page is a draft
This page is a draft

View file

@ -3,4 +3,3 @@ author: Alexis Métaireau
Markdown with filename metadata
===============================

View file

@ -5,4 +5,3 @@ Title: Test Markdown extensions
## Level1
### Level2

View file

@ -3,4 +3,3 @@ This is a super article !
#########################
:Category: Yeah

View file

@ -3,4 +3,3 @@ This is an article without category !
#####################################
This article should be in the DEFAULT_CATEGORY.

View file

@ -1064,4 +1064,4 @@
<gd:extendedProperty name="blogger.itemClass" value="pid-944253050"/>
<gd:extendedProperty name="blogger.displayTime" value="29 november 2010 om 12:35"/>
</entry>
</feed>
</feed>

View file

@ -1 +1 @@



View file

@ -52,4 +52,3 @@ quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

View file

@ -838,7 +838,7 @@ proident, sunt in culpa qui officia deserunt mollit anim id est laborum.]]></con
<wp:meta_key>_edit_last</wp:meta_key>
<wp:meta_value><![CDATA[3]]></wp:meta_value>
</wp:postmeta>
</item>
</item>
<item>
<title>A 2nd custom post type also in category 5</title>
<link>http://thisisa.test/?p=177</link>

View file

@ -1,43 +1,47 @@
AUTHOR = 'Alexis Métaireau'
AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org'
TIMEZONE = 'UTC'
SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "UTC"
GITHUB_URL = 'http://github.com/ametaireau/'
GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
DEFAULT_PAGINATION = 2
FEED_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml'
FEED_RSS = "feeds/all.rss.xml"
CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"),
('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),)
LINKS = (
("Biologeek", "http://biologeek.org"),
("Filyb", "http://filyb.info/"),
("Libert-fr", "http://www.libert-fr.com"),
("N1k0", "http://prendreuncafe.com/blog/"),
("Tarek Ziadé", "http://ziade.org/blog"),
("Zubin Mithra", "http://zubin71.wordpress.com/"),
)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
('lastfm', 'http://lastfm.com/user/akounet'),
('github', 'http://github.com/ametaireau'),)
SOCIAL = (
("twitter", "http://twitter.com/ametaireau"),
("lastfm", "http://lastfm.com/user/akounet"),
("github", "http://github.com/ametaireau"),
)
# global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'}
DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
"extra/robots.txt": {"path": "robots.txt"},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
'pictures',
'extra/robots.txt',
"pictures",
"extra/robots.txt",
]
FORMATTED_FIELDS = ['summary', 'custom_formatted_field']
FORMATTED_FIELDS = ["summary", "custom_formatted_field"]
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps

View file

@ -1,4 +1,4 @@
NAME = 'namespace plugin'
NAME = "namespace plugin"
def register():

View file

@ -16,7 +16,10 @@ from pelican.contents import Article
from pelican.readers import default_metadata
from pelican.settings import DEFAULT_CONFIG
__all__ = ['get_article', 'unittest', ]
__all__ = [
"get_article",
"unittest",
]
@contextmanager
@ -51,7 +54,7 @@ def isplit(s, sep=None):
True
"""
sep, hardsep = r'\s+' if sep is None else re.escape(sep), sep is not None
sep, hardsep = r"\s+" if sep is None else re.escape(sep), sep is not None
exp, pos, length = re.compile(sep), 0, len(s)
while True:
m = exp.search(s, pos)
@ -89,10 +92,8 @@ def mute(returns_output=False):
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
saved_stdout = sys.stdout
sys.stdout = StringIO()
@ -112,7 +113,7 @@ def mute(returns_output=False):
def get_article(title, content, **extra_metadata):
metadata = default_metadata(settings=DEFAULT_CONFIG)
metadata['title'] = title
metadata["title"] = title
if extra_metadata:
metadata.update(extra_metadata)
return Article(content, metadata=metadata)
@ -125,14 +126,14 @@ def skipIfNoExecutable(executable):
and skips the tests if not found (if subprocess raises a `OSError`).
"""
with open(os.devnull, 'w') as fnull:
with open(os.devnull, "w") as fnull:
try:
res = subprocess.call(executable, stdout=fnull, stderr=fnull)
except OSError:
res = None
if res is None:
return unittest.skip('{} executable not found'.format(executable))
return unittest.skip("{} executable not found".format(executable))
return lambda func: func
@ -164,10 +165,7 @@ def can_symlink():
res = True
try:
with temporary_folder() as f:
os.symlink(
f,
os.path.join(f, 'symlink')
)
os.symlink(f, os.path.join(f, "symlink"))
except OSError:
res = False
return res
@ -186,9 +184,9 @@ def get_settings(**kwargs):
def get_context(settings=None, **kwargs):
context = settings.copy() if settings else {}
context['generated_content'] = {}
context['static_links'] = set()
context['static_content'] = {}
context["generated_content"] = {}
context["static_links"] = set()
context["static_content"] = {}
context.update(kwargs)
return context
@ -200,22 +198,24 @@ class LogCountHandler(BufferingHandler):
super().__init__(capacity)
def count_logs(self, msg=None, level=None):
return len([
rec
for rec
in self.buffer
if (msg is None or re.match(msg, rec.getMessage())) and
(level is None or rec.levelno == level)
])
return len(
[
rec
for rec in self.buffer
if (msg is None or re.match(msg, rec.getMessage()))
and (level is None or rec.levelno == level)
]
)
def count_formatted_logs(self, msg=None, level=None):
return len([
rec
for rec
in self.buffer
if (msg is None or re.search(msg, self.format(rec))) and
(level is None or rec.levelno == level)
])
return len(
[
rec
for rec in self.buffer
if (msg is None or re.search(msg, self.format(rec)))
and (level is None or rec.levelno == level)
]
)
def diff_subproc(first, second):
@ -228,8 +228,16 @@ def diff_subproc(first, second):
>>> didCheckFail = proc.returnCode != 0
"""
return subprocess.Popen(
['git', '--no-pager', 'diff', '--no-ext-diff', '--exit-code',
'-w', first, second],
[
"git",
"--no-pager",
"diff",
"--no-ext-diff",
"--exit-code",
"-w",
first,
second,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
@ -251,9 +259,12 @@ class LoggedTestCase(unittest.TestCase):
def assertLogCountEqual(self, count=None, msg=None, **kwargs):
actual = self._logcount_handler.count_logs(msg=msg, **kwargs)
self.assertEqual(
actual, count,
msg='expected {} occurrences of {!r}, but found {}'.format(
count, msg, actual))
actual,
count,
msg="expected {} occurrences of {!r}, but found {}".format(
count, msg, actual
),
)
class TestCaseWithCLocale(unittest.TestCase):
@ -261,9 +272,10 @@ class TestCaseWithCLocale(unittest.TestCase):
Use utils.temporary_locale if you want a context manager ("with" statement).
"""
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, "C")
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)

View file

@ -8,31 +8,30 @@ from pelican.tests.support import get_context, get_settings, unittest
CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, 'content')
CONTENT_DIR = os.path.join(CUR_DIR, "content")
class TestCache(unittest.TestCase):
def setUp(self):
self.temp_cache = mkdtemp(prefix='pelican_cache.')
self.temp_cache = mkdtemp(prefix="pelican_cache.")
def tearDown(self):
rmtree(self.temp_cache)
def _get_cache_enabled_settings(self):
settings = get_settings()
settings['CACHE_CONTENT'] = True
settings['LOAD_CONTENT_CACHE'] = True
settings['CACHE_PATH'] = self.temp_cache
settings["CACHE_CONTENT"] = True
settings["LOAD_CONTENT_CACHE"] = True
settings["CACHE_PATH"] = self.temp_cache
return settings
def test_generator_caching(self):
"""Test that cached and uncached content is same in generator level"""
settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'generator'
settings['PAGE_PATHS'] = ['TestPages']
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None}
settings["CONTENT_CACHING_LAYER"] = "generator"
settings["PAGE_PATHS"] = ["TestPages"]
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["READERS"] = {"asc": None}
context = get_context(settings)
def sorted_titles(items):
@ -40,15 +39,23 @@ class TestCache(unittest.TestCase):
# Articles
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts)
@ -58,16 +65,24 @@ class TestCache(unittest.TestCase):
# Pages
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages)
uncached_draft_pages = sorted_titles(generator.draft_pages)
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages)
@ -80,10 +95,10 @@ class TestCache(unittest.TestCase):
def test_reader_caching(self):
"""Test that cached and uncached content is same in reader level"""
settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'reader'
settings['PAGE_PATHS'] = ['TestPages']
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None}
settings["CONTENT_CACHING_LAYER"] = "reader"
settings["PAGE_PATHS"] = ["TestPages"]
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["READERS"] = {"asc": None}
context = get_context(settings)
def sorted_titles(items):
@ -91,15 +106,23 @@ class TestCache(unittest.TestCase):
# Articles
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_articles = sorted_titles(generator.articles)
uncached_drafts = sorted_titles(generator.drafts)
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_articles = sorted_titles(generator.articles)
cached_drafts = sorted_titles(generator.drafts)
@ -109,15 +132,23 @@ class TestCache(unittest.TestCase):
# Pages
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
uncached_pages = sorted_titles(generator.pages)
uncached_hidden_pages = sorted_titles(generator.hidden_pages)
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
cached_pages = sorted_titles(generator.pages)
cached_hidden_pages = sorted_titles(generator.hidden_pages)
@ -128,20 +159,28 @@ class TestCache(unittest.TestCase):
def test_article_object_caching(self):
"""Test Article objects caching at the generator level"""
settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'generator'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None}
settings["CONTENT_CACHING_LAYER"] = "generator"
settings["DEFAULT_DATE"] = (1970, 1, 1)
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator, '_cache'))
self.assertTrue(hasattr(generator, "_cache"))
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
"""
@ -158,18 +197,26 @@ class TestCache(unittest.TestCase):
def test_article_reader_content_caching(self):
"""Test raw article content caching at the reader level"""
settings = self._get_cache_enabled_settings()
settings['READERS'] = {'asc': None}
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator.readers, '_cache'))
self.assertTrue(hasattr(generator.readers, "_cache"))
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
readers = generator.readers.readers
for reader in readers.values():
reader.read = MagicMock()
@ -182,44 +229,58 @@ class TestCache(unittest.TestCase):
used in --ignore-cache or autoreload mode"""
settings = self._get_cache_enabled_settings()
settings['READERS'] = {'asc': None}
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertTrue(hasattr(generator, '_cache_open'))
self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count
settings['LOAD_CONTENT_CACHE'] = False
settings["LOAD_CONTENT_CACHE"] = False
generator = ArticlesGenerator(
context=context.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CONTENT_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertEqual(
generator.readers.read_file.call_count,
orig_call_count)
self.assertEqual(generator.readers.read_file.call_count, orig_call_count)
def test_page_object_caching(self):
"""Test Page objects caching at the generator level"""
settings = self._get_cache_enabled_settings()
settings['CONTENT_CACHING_LAYER'] = 'generator'
settings['PAGE_PATHS'] = ['TestPages']
settings['READERS'] = {'asc': None}
settings["CONTENT_CACHING_LAYER"] = "generator"
settings["PAGE_PATHS"] = ["TestPages"]
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator, '_cache'))
self.assertTrue(hasattr(generator, "_cache"))
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
"""
@ -231,19 +292,27 @@ class TestCache(unittest.TestCase):
def test_page_reader_content_caching(self):
"""Test raw page content caching at the reader level"""
settings = self._get_cache_enabled_settings()
settings['PAGE_PATHS'] = ['TestPages']
settings['READERS'] = {'asc': None}
settings["PAGE_PATHS"] = ["TestPages"]
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.generate_context()
self.assertTrue(hasattr(generator.readers, '_cache'))
self.assertTrue(hasattr(generator.readers, "_cache"))
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
readers = generator.readers.readers
for reader in readers.values():
reader.read = MagicMock()
@ -256,24 +325,30 @@ class TestCache(unittest.TestCase):
used in --ignore_cache or autoreload mode"""
settings = self._get_cache_enabled_settings()
settings['PAGE_PATHS'] = ['TestPages']
settings['READERS'] = {'asc': None}
settings["PAGE_PATHS"] = ["TestPages"]
settings["READERS"] = {"asc": None}
context = get_context(settings)
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertTrue(hasattr(generator, '_cache_open'))
self.assertTrue(hasattr(generator, "_cache_open"))
orig_call_count = generator.readers.read_file.call_count
settings['LOAD_CONTENT_CACHE'] = False
settings["LOAD_CONTENT_CACHE"] = False
generator = PagesGenerator(
context=context.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
context=context.copy(),
settings=settings,
path=CUR_DIR,
theme=settings["THEME"],
output_path=None,
)
generator.readers.read_file = MagicMock()
generator.generate_context()
self.assertEqual(
generator.readers.read_file.call_count,
orig_call_count)
self.assertEqual(generator.readers.read_file.call_count, orig_call_count)

View file

@ -5,68 +5,77 @@ from pelican import get_config, parse_arguments
class TestParseOverrides(unittest.TestCase):
def test_flags(self):
for flag in ['-e', '--extra-settings']:
args = parse_arguments([flag, 'k=1'])
self.assertDictEqual(args.overrides, {'k': 1})
for flag in ["-e", "--extra-settings"]:
args = parse_arguments([flag, "k=1"])
self.assertDictEqual(args.overrides, {"k": 1})
def test_parse_multiple_items(self):
args = parse_arguments('-e k1=1 k2=2'.split())
self.assertDictEqual(args.overrides, {'k1': 1, 'k2': 2})
args = parse_arguments("-e k1=1 k2=2".split())
self.assertDictEqual(args.overrides, {"k1": 1, "k2": 2})
def test_parse_valid_json(self):
json_values_python_values_map = {
'""': '',
'null': None,
'"string"': 'string',
'["foo", 12, "4", {}]': ['foo', 12, '4', {}]
'""': "",
"null": None,
'"string"': "string",
'["foo", 12, "4", {}]': ["foo", 12, "4", {}],
}
for k, v in json_values_python_values_map.items():
args = parse_arguments(['-e', 'k=' + k])
self.assertDictEqual(args.overrides, {'k': v})
args = parse_arguments(["-e", "k=" + k])
self.assertDictEqual(args.overrides, {"k": v})
def test_parse_invalid_syntax(self):
invalid_items = ['k= 1', 'k =1', 'k', 'k v']
invalid_items = ["k= 1", "k =1", "k", "k v"]
for item in invalid_items:
with self.assertRaises(ValueError):
parse_arguments(f'-e {item}'.split())
parse_arguments(f"-e {item}".split())
def test_parse_invalid_json(self):
invalid_json = {
'', 'False', 'True', 'None', 'some other string',
'{"foo": bar}', '[foo]'
"",
"False",
"True",
"None",
"some other string",
'{"foo": bar}',
"[foo]",
}
for v in invalid_json:
with self.assertRaises(ValueError):
parse_arguments(['-e ', 'k=' + v])
parse_arguments(["-e ", "k=" + v])
class TestGetConfigFromArgs(unittest.TestCase):
def test_overrides_known_keys(self):
args = parse_arguments([
'-e',
'DELETE_OUTPUT_DIRECTORY=false',
'OUTPUT_RETENTION=["1.txt"]',
'SITENAME="Title"'
])
args = parse_arguments(
[
"-e",
"DELETE_OUTPUT_DIRECTORY=false",
'OUTPUT_RETENTION=["1.txt"]',
'SITENAME="Title"',
]
)
config = get_config(args)
config_must_contain = {
'DELETE_OUTPUT_DIRECTORY': False,
'OUTPUT_RETENTION': ['1.txt'],
'SITENAME': 'Title'
"DELETE_OUTPUT_DIRECTORY": False,
"OUTPUT_RETENTION": ["1.txt"],
"SITENAME": "Title",
}
self.assertDictEqual(config, {**config, **config_must_contain})
def test_overrides_non_default_type(self):
args = parse_arguments([
'-e',
'DISPLAY_PAGES_ON_MENU=123',
'PAGE_TRANSLATION_ID=null',
'TRANSLATION_FEED_RSS_URL="someurl"'
])
args = parse_arguments(
[
"-e",
"DISPLAY_PAGES_ON_MENU=123",
"PAGE_TRANSLATION_ID=null",
'TRANSLATION_FEED_RSS_URL="someurl"',
]
)
config = get_config(args)
config_must_contain = {
'DISPLAY_PAGES_ON_MENU': 123,
'PAGE_TRANSLATION_ID': None,
'TRANSLATION_FEED_RSS_URL': 'someurl'
"DISPLAY_PAGES_ON_MENU": 123,
"PAGE_TRANSLATION_ID": None,
"TRANSLATION_FEED_RSS_URL": "someurl",
}
self.assertDictEqual(config, {**config, **config_must_contain})

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -4,26 +4,35 @@ from posixpath import join as posix_join
from unittest.mock import patch
from pelican.settings import DEFAULT_CONFIG
from pelican.tests.support import (mute, skipIfNoExecutable, temporary_folder,
unittest, TestCaseWithCLocale)
from pelican.tools.pelican_import import (blogger2fields, build_header,
build_markdown_header,
decode_wp_content,
download_attachments, fields2pelican,
get_attachments, tumblr2fields,
wp2fields,
)
from pelican.tests.support import (
mute,
skipIfNoExecutable,
temporary_folder,
unittest,
TestCaseWithCLocale,
)
from pelican.tools.pelican_import import (
blogger2fields,
build_header,
build_markdown_header,
decode_wp_content,
download_attachments,
fields2pelican,
get_attachments,
tumblr2fields,
wp2fields,
)
from pelican.utils import path_to_file_url, slugify
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'bloggerexport.xml')
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, 'content', 'wordpressexport.xml')
WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
'content',
'wordpress_content_encoded')
WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(CUR_DIR,
'content',
'wordpress_content_decoded')
BLOGGER_XML_SAMPLE = os.path.join(CUR_DIR, "content", "bloggerexport.xml")
WORDPRESS_XML_SAMPLE = os.path.join(CUR_DIR, "content", "wordpressexport.xml")
WORDPRESS_ENCODED_CONTENT_SAMPLE = os.path.join(
CUR_DIR, "content", "wordpress_content_encoded"
)
WORDPRESS_DECODED_CONTENT_SAMPLE = os.path.join(
CUR_DIR, "content", "wordpress_content_decoded"
)
try:
from bs4 import BeautifulSoup
@ -36,10 +45,9 @@ except ImportError:
LXML = False
@skipIfNoExecutable(['pandoc', '--version'])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
@skipIfNoExecutable(["pandoc", "--version"])
@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestBloggerXmlImporter(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.posts = blogger2fields(BLOGGER_XML_SAMPLE)
@ -50,16 +58,17 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
"""
test_posts = list(self.posts)
kinds = {x[8] for x in test_posts}
self.assertEqual({'page', 'article', 'comment'}, kinds)
page_titles = {x[0] for x in test_posts if x[8] == 'page'}
self.assertEqual({'Test page', 'Test page 2'}, page_titles)
article_titles = {x[0] for x in test_posts if x[8] == 'article'}
self.assertEqual({'Black as Egypt\'s Night', 'The Steel Windpipe'},
article_titles)
comment_titles = {x[0] for x in test_posts if x[8] == 'comment'}
self.assertEqual({'Mishka, always a pleasure to read your '
'adventures!...'},
comment_titles)
self.assertEqual({"page", "article", "comment"}, kinds)
page_titles = {x[0] for x in test_posts if x[8] == "page"}
self.assertEqual({"Test page", "Test page 2"}, page_titles)
article_titles = {x[0] for x in test_posts if x[8] == "article"}
self.assertEqual(
{"Black as Egypt's Night", "The Steel Windpipe"}, article_titles
)
comment_titles = {x[0] for x in test_posts if x[8] == "comment"}
self.assertEqual(
{"Mishka, always a pleasure to read your " "adventures!..."}, comment_titles
)
def test_recognise_status_with_correct_filename(self):
"""Check that importerer outputs only statuses 'published' and 'draft',
@ -67,24 +76,25 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
"""
test_posts = list(self.posts)
statuses = {x[7] for x in test_posts}
self.assertEqual({'published', 'draft'}, statuses)
self.assertEqual({"published", "draft"}, statuses)
draft_filenames = {x[2] for x in test_posts if x[7] == 'draft'}
draft_filenames = {x[2] for x in test_posts if x[7] == "draft"}
# draft filenames are id-based
self.assertEqual({'page-4386962582497458967',
'post-1276418104709695660'}, draft_filenames)
self.assertEqual(
{"page-4386962582497458967", "post-1276418104709695660"}, draft_filenames
)
published_filenames = {x[2] for x in test_posts if x[7] == 'published'}
published_filenames = {x[2] for x in test_posts if x[7] == "published"}
# published filenames are url-based, except comments
self.assertEqual({'the-steel-windpipe',
'test-page',
'post-5590533389087749201'}, published_filenames)
self.assertEqual(
{"the-steel-windpipe", "test-page", "post-5590533389087749201"},
published_filenames,
)
@skipIfNoExecutable(['pandoc', '--version'])
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
@skipIfNoExecutable(["pandoc", "--version"])
@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
class TestWordpressXmlImporter(TestCaseWithCLocale):
def setUp(self):
super().setUp()
self.posts = wp2fields(WORDPRESS_XML_SAMPLE)
@ -92,30 +102,49 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_ignore_empty_posts(self):
self.assertTrue(self.posts)
for (title, content, fname, date, author,
categ, tags, status, kind, format) in self.posts:
for (
title,
content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.posts:
self.assertTrue(title.strip())
def test_recognise_page_kind(self):
""" Check that we recognise pages in wordpress, as opposed to posts """
"""Check that we recognise pages in wordpress, as opposed to posts"""
self.assertTrue(self.posts)
# Collect (title, filename, kind) of non-empty posts recognised as page
pages_data = []
for (title, content, fname, date, author,
categ, tags, status, kind, format) in self.posts:
if kind == 'page':
for (
title,
content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.posts:
if kind == "page":
pages_data.append((title, fname))
self.assertEqual(2, len(pages_data))
self.assertEqual(('Page', 'contact'), pages_data[0])
self.assertEqual(('Empty Page', 'empty'), pages_data[1])
self.assertEqual(("Page", "contact"), pages_data[0])
self.assertEqual(("Empty Page", "empty"), pages_data[1])
def test_dirpage_directive_for_page_kind(self):
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Empty Page"), self.posts)
with temporary_folder() as temp:
fname = list(silent_f2p(test_post, 'markdown',
temp, dirpage=True))[0]
self.assertTrue(fname.endswith('pages%sempty.md' % os.path.sep))
fname = list(silent_f2p(test_post, "markdown", temp, dirpage=True))[0]
self.assertTrue(fname.endswith("pages%sempty.md" % os.path.sep))
def test_dircat(self):
silent_f2p = mute(True)(fields2pelican)
@ -125,14 +154,13 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
if len(post[5]) > 0: # Has a category
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown',
temp, dircat=True))
subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
fnames = list(silent_f2p(test_posts, "markdown", temp, dircat=True))
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
index = 0
for post in test_posts:
name = post[2]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += '.md'
name += ".md"
filename = os.path.join(category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
@ -141,9 +169,19 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_unless_custom_post_all_items_should_be_pages_or_posts(self):
self.assertTrue(self.posts)
pages_data = []
for (title, content, fname, date, author, categ,
tags, status, kind, format) in self.posts:
if kind == 'page' or kind == 'article':
for (
title,
content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.posts:
if kind == "page" or kind == "article":
pass
else:
pages_data.append((title, fname))
@ -152,40 +190,45 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
def test_recognise_custom_post_type(self):
self.assertTrue(self.custposts)
cust_data = []
for (title, content, fname, date, author, categ,
tags, status, kind, format) in self.custposts:
if kind == 'article' or kind == 'page':
for (
title,
content,
fname,
date,
author,
categ,
tags,
status,
kind,
format,
) in self.custposts:
if kind == "article" or kind == "page":
pass
else:
cust_data.append((title, kind))
self.assertEqual(3, len(cust_data))
self.assertEqual(("A custom post in category 4", "custom1"), cust_data[0])
self.assertEqual(("A custom post in category 5", "custom1"), cust_data[1])
self.assertEqual(
('A custom post in category 4', 'custom1'),
cust_data[0])
self.assertEqual(
('A custom post in category 5', 'custom1'),
cust_data[1])
self.assertEqual(
('A 2nd custom post type also in category 5', 'custom2'),
cust_data[2])
("A 2nd custom post type also in category 5", "custom2"), cust_data[2]
)
def test_custom_posts_put_in_own_dir(self):
silent_f2p = mute(True)(fields2pelican)
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'article' or post[8] == 'page':
if post[8] == "article" or post[8] == "page":
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown',
temp, wp_custpost=True))
fnames = list(silent_f2p(test_posts, "markdown", temp, wp_custpost=True))
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
name += '.md'
name += ".md"
filename = os.path.join(kind, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
@ -196,20 +239,21 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'article' or post[8] == 'page':
if post[8] == "article" or post[8] == "page":
pass
else:
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dircat=True))
subs = DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS']
fnames = list(
silent_f2p(test_posts, "markdown", temp, wp_custpost=True, dircat=True)
)
subs = DEFAULT_CONFIG["SLUG_REGEX_SUBSTITUTIONS"]
index = 0
for post in test_posts:
name = post[2]
kind = post[8]
category = slugify(post[5][0], regex_subs=subs, preserve_case=True)
name += '.md'
name += ".md"
filename = os.path.join(kind, category, name)
out_name = fnames[index]
self.assertTrue(out_name.endswith(filename))
@ -221,16 +265,19 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = []
for post in self.custposts:
# check post kind
if post[8] == 'page':
if post[8] == "page":
test_posts.append(post)
with temporary_folder() as temp:
fnames = list(silent_f2p(test_posts, 'markdown', temp,
wp_custpost=True, dirpage=False))
fnames = list(
silent_f2p(
test_posts, "markdown", temp, wp_custpost=True, dirpage=False
)
)
index = 0
for post in test_posts:
name = post[2]
name += '.md'
filename = os.path.join('pages', name)
name += ".md"
filename = os.path.join("pages", name)
out_name = fnames[index]
self.assertFalse(out_name.endswith(filename))
@ -238,117 +285,114 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
test_posts = list(self.posts)
def r(f):
with open(f, encoding='utf-8') as infile:
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
with temporary_folder() as temp:
rst_files = (r(f) for f
in silent_f2p(test_posts, 'markdown', temp))
self.assertTrue(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f
in silent_f2p(test_posts, 'markdown',
temp, strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(test_posts, "markdown", temp))
self.assertTrue(any("<iframe" in rst for rst in rst_files))
rst_files = (
r(f) for f in silent_f2p(test_posts, "markdown", temp, strip_raw=True)
)
self.assertFalse(any("<iframe" in rst for rst in rst_files))
# no effect in rst
rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(test_posts, 'rst', temp,
strip_raw=True))
self.assertFalse(any('<iframe' in rst for rst in rst_files))
rst_files = (r(f) for f in silent_f2p(test_posts, "rst", temp))
self.assertFalse(any("<iframe" in rst for rst in rst_files))
rst_files = (
r(f) for f in silent_f2p(test_posts, "rst", temp, strip_raw=True)
)
self.assertFalse(any("<iframe" in rst for rst in rst_files))
def test_decode_html_entities_in_titles(self):
test_posts = [post for post
in self.posts if post[2] == 'html-entity-test']
test_posts = [post for post in self.posts if post[2] == "html-entity-test"]
self.assertEqual(len(test_posts), 1)
post = test_posts[0]
title = post[0]
self.assertTrue(title, "A normal post with some <html> entities in "
"the title. You can't miss them.")
self.assertNotIn('&', title)
self.assertTrue(
title,
"A normal post with some <html> entities in "
"the title. You can't miss them.",
)
self.assertNotIn("&", title)
def test_decode_wp_content_returns_empty(self):
""" Check that given an empty string we return an empty string."""
"""Check that given an empty string we return an empty string."""
self.assertEqual(decode_wp_content(""), "")
def test_decode_wp_content(self):
""" Check that we can decode a wordpress content string."""
"""Check that we can decode a wordpress content string."""
with open(WORDPRESS_ENCODED_CONTENT_SAMPLE) as encoded_file:
encoded_content = encoded_file.read()
with open(WORDPRESS_DECODED_CONTENT_SAMPLE) as decoded_file:
decoded_content = decoded_file.read()
self.assertEqual(
decode_wp_content(encoded_content, br=False),
decoded_content)
decode_wp_content(encoded_content, br=False), decoded_content
)
def test_preserve_verbatim_formatting(self):
def r(f):
with open(f, encoding='utf-8') as infile:
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Code in List"),
self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
self.assertTrue(re.search(r'\s+a = \[1, 2, 3\]', md))
self.assertTrue(re.search(r'\s+b = \[4, 5, 6\]', md))
for_line = re.search(r'\s+for i in zip\(a, b\):', md).group(0)
print_line = re.search(r'\s+print i', md).group(0)
self.assertTrue(
for_line.rindex('for') < print_line.rindex('print'))
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
self.assertTrue(re.search(r"\s+a = \[1, 2, 3\]", md))
self.assertTrue(re.search(r"\s+b = \[4, 5, 6\]", md))
for_line = re.search(r"\s+for i in zip\(a, b\):", md).group(0)
print_line = re.search(r"\s+print i", md).group(0)
self.assertTrue(for_line.rindex("for") < print_line.rindex("print"))
def test_code_in_list(self):
def r(f):
with open(f, encoding='utf-8') as infile:
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Code in List"),
self.posts)
test_post = filter(lambda p: p[0].startswith("Code in List"), self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
sample_line = re.search(r'- This is a code sample', md).group(0)
code_line = re.search(r'\s+a = \[1, 2, 3\]', md).group(0)
self.assertTrue(sample_line.rindex('This') < code_line.rindex('a'))
md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
sample_line = re.search(r"- This is a code sample", md).group(0)
code_line = re.search(r"\s+a = \[1, 2, 3\]", md).group(0)
self.assertTrue(sample_line.rindex("This") < code_line.rindex("a"))
def test_dont_use_smart_quotes(self):
def r(f):
with open(f, encoding='utf-8') as infile:
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Post with raw data"),
self.posts)
test_post = filter(lambda p: p[0].startswith("Post with raw data"), self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
escaped_quotes = re.search(r'\\[\'"“”‘’]', md)
self.assertFalse(escaped_quotes)
def test_convert_caption_to_figure(self):
def r(f):
with open(f, encoding='utf-8') as infile:
with open(f, encoding="utf-8") as infile:
return infile.read()
silent_f2p = mute(True)(fields2pelican)
test_post = filter(
lambda p: p[0].startswith("Caption on image"),
self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, 'markdown', temp)][0]
caption = re.search(r'\[caption', md)
silent_f2p = mute(True)(fields2pelican)
test_post = filter(lambda p: p[0].startswith("Caption on image"), self.posts)
with temporary_folder() as temp:
md = [r(f) for f in silent_f2p(test_post, "markdown", temp)][0]
caption = re.search(r"\[caption", md)
self.assertFalse(caption)
for occurence in [
'/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png',
'/theme/img/xpelican-3.png.pagespeed.ic.m-NAIdRCOM.png',
'/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png',
'This is a pelican',
'This also a pelican',
'Yet another pelican',
"/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png",
"/theme/img/xpelican-3.png.pagespeed.ic.m-NAIdRCOM.png",
"/theme/img/xpelican.png.pagespeed.ic.Rjep0025-y.png",
"This is a pelican",
"This also a pelican",
"Yet another pelican",
]:
# pandoc 2.x converts into ![text](src)
# pandoc 3.x converts into <figure>src<figcaption>text</figcaption></figure>
@ -357,70 +401,97 @@ class TestWordpressXmlImporter(TestCaseWithCLocale):
class TestBuildHeader(unittest.TestCase):
def test_build_header(self):
header = build_header('test', None, None, None, None, None)
self.assertEqual(header, 'test\n####\n\n')
header = build_header("test", None, None, None, None, None)
self.assertEqual(header, "test\n####\n\n")
def test_build_header_with_fields(self):
header_data = [
'Test Post',
'2014-11-04',
'Alexis Métaireau',
['Programming'],
['Pelican', 'Python'],
'test-post',
"Test Post",
"2014-11-04",
"Alexis Métaireau",
["Programming"],
["Pelican", "Python"],
"test-post",
]
expected_docutils = '\n'.join([
'Test Post',
'#########',
':date: 2014-11-04',
':author: Alexis Métaireau',
':category: Programming',
':tags: Pelican, Python',
':slug: test-post',
'\n',
])
expected_docutils = "\n".join(
[
"Test Post",
"#########",
":date: 2014-11-04",
":author: Alexis Métaireau",
":category: Programming",
":tags: Pelican, Python",
":slug: test-post",
"\n",
]
)
expected_md = '\n'.join([
'Title: Test Post',
'Date: 2014-11-04',
'Author: Alexis Métaireau',
'Category: Programming',
'Tags: Pelican, Python',
'Slug: test-post',
'\n',
])
expected_md = "\n".join(
[
"Title: Test Post",
"Date: 2014-11-04",
"Author: Alexis Métaireau",
"Category: Programming",
"Tags: Pelican, Python",
"Slug: test-post",
"\n",
]
)
self.assertEqual(build_header(*header_data), expected_docutils)
self.assertEqual(build_markdown_header(*header_data), expected_md)
def test_build_header_with_east_asian_characters(self):
header = build_header('これは広い幅の文字だけで構成されたタイトルです',
None, None, None, None, None)
header = build_header(
"これは広い幅の文字だけで構成されたタイトルです",
None,
None,
None,
None,
None,
)
self.assertEqual(header,
('これは広い幅の文字だけで構成されたタイトルです\n'
'##############################################'
'\n\n'))
def test_galleries_added_to_header(self):
header = build_header('test', None, None, None, None, None,
attachments=['output/test1', 'output/test2'])
self.assertEqual(header, ('test\n####\n'
':attachments: output/test1, '
'output/test2\n\n'))
def test_galleries_added_to_markdown_header(self):
header = build_markdown_header('test', None, None, None, None, None,
attachments=['output/test1',
'output/test2'])
self.assertEqual(
header,
'Title: test\nAttachments: output/test1, output/test2\n\n')
(
"これは広い幅の文字だけで構成されたタイトルです\n"
"##############################################"
"\n\n"
),
)
def test_galleries_added_to_header(self):
header = build_header(
"test",
None,
None,
None,
None,
None,
attachments=["output/test1", "output/test2"],
)
self.assertEqual(
header, ("test\n####\n" ":attachments: output/test1, " "output/test2\n\n")
)
def test_galleries_added_to_markdown_header(self):
header = build_markdown_header(
"test",
None,
None,
None,
None,
None,
attachments=["output/test1", "output/test2"],
)
self.assertEqual(
header, "Title: test\nAttachments: output/test1, output/test2\n\n"
)
@unittest.skipUnless(BeautifulSoup, 'Needs BeautifulSoup module')
@unittest.skipUnless(LXML, 'Needs lxml module')
@unittest.skipUnless(BeautifulSoup, "Needs BeautifulSoup module")
@unittest.skipUnless(LXML, "Needs lxml module")
class TestWordpressXMLAttachements(TestCaseWithCLocale):
def setUp(self):
super().setUp()
@ -435,38 +506,45 @@ class TestWordpressXMLAttachements(TestCaseWithCLocale):
for post in self.attachments.keys():
if post is None:
expected = {
('https://upload.wikimedia.org/wikipedia/commons/'
'thumb/2/2c/Pelican_lakes_entrance02.jpg/'
'240px-Pelican_lakes_entrance02.jpg')
(
"https://upload.wikimedia.org/wikipedia/commons/"
"thumb/2/2c/Pelican_lakes_entrance02.jpg/"
"240px-Pelican_lakes_entrance02.jpg"
)
}
self.assertEqual(self.attachments[post], expected)
elif post == 'with-excerpt':
expected_invalid = ('http://thisurlisinvalid.notarealdomain/'
'not_an_image.jpg')
expected_pelikan = ('http://en.wikipedia.org/wiki/'
'File:Pelikan_Walvis_Bay.jpg')
self.assertEqual(self.attachments[post],
{expected_invalid, expected_pelikan})
elif post == 'with-tags':
expected_invalid = ('http://thisurlisinvalid.notarealdomain')
elif post == "with-excerpt":
expected_invalid = (
"http://thisurlisinvalid.notarealdomain/" "not_an_image.jpg"
)
expected_pelikan = (
"http://en.wikipedia.org/wiki/" "File:Pelikan_Walvis_Bay.jpg"
)
self.assertEqual(
self.attachments[post], {expected_invalid, expected_pelikan}
)
elif post == "with-tags":
expected_invalid = "http://thisurlisinvalid.notarealdomain"
self.assertEqual(self.attachments[post], {expected_invalid})
else:
self.fail('all attachments should match to a '
'filename or None, {}'
.format(post))
self.fail(
"all attachments should match to a " "filename or None, {}".format(
post
)
)
def test_download_attachments(self):
real_file = os.path.join(CUR_DIR, 'content/article.rst')
real_file = os.path.join(CUR_DIR, "content/article.rst")
good_url = path_to_file_url(real_file)
bad_url = 'http://localhost:1/not_a_file.txt'
bad_url = "http://localhost:1/not_a_file.txt"
silent_da = mute()(download_attachments)
with temporary_folder() as temp:
locations = list(silent_da(temp, [good_url, bad_url]))
self.assertEqual(1, len(locations))
directory = locations[0]
self.assertTrue(
directory.endswith(posix_join('content', 'article.rst')),
directory)
directory.endswith(posix_join("content", "article.rst")), directory
)
class TestTumblrImporter(TestCaseWithCLocale):
@ -484,32 +562,42 @@ class TestTumblrImporter(TestCaseWithCLocale):
"timestamp": 1573162000,
"format": "html",
"slug": "a-slug",
"tags": [
"economics"
],
"tags": ["economics"],
"state": "published",
"photos": [
{
"caption": "",
"original_size": {
"url": "https://..fccdc2360ba7182a.jpg",
"width": 634,
"height": 789
"height": 789,
},
}]
}
],
}
]
get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual(
[('Photo',
'<img alt="" src="https://..fccdc2360ba7182a.jpg" />\n',
'2019-11-07-a-slug', '2019-11-07 21:26:40+0000', 'testy', ['photo'],
['economics'], 'published', 'article', 'html')],
[
(
"Photo",
'<img alt="" src="https://..fccdc2360ba7182a.jpg" />\n',
"2019-11-07-a-slug",
"2019-11-07 21:26:40+0000",
"testy",
["photo"],
["economics"],
"published",
"article",
"html",
)
],
posts,
posts)
posts,
)
@patch("pelican.tools.pelican_import._get_tumblr_posts")
def test_video_embed(self, get):
@ -531,40 +619,39 @@ class TestTumblrImporter(TestCaseWithCLocale):
"source_title": "youtube.com",
"caption": "<p>Caption</p>",
"player": [
{
"width": 250,
"embed_code":
"<iframe>1</iframe>"
},
{
"width": 400,
"embed_code":
"<iframe>2</iframe>"
},
{
"width": 500,
"embed_code":
"<iframe>3</iframe>"
}
{"width": 250, "embed_code": "<iframe>1</iframe>"},
{"width": 400, "embed_code": "<iframe>2</iframe>"},
{"width": 500, "embed_code": "<iframe>3</iframe>"},
],
"video_type": "youtube",
}
]
]
get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual(
[('youtube.com',
'<p><a href="https://href.li/?'
'https://www.youtube.com/a">via</a></p>\n<p>Caption</p>'
'<iframe>1</iframe>\n'
'<iframe>2</iframe>\n'
'<iframe>3</iframe>\n',
'2017-07-07-the-slug',
'2017-07-07 20:31:41+0000', 'testy', ['video'], [], 'published',
'article', 'html')],
[
(
"youtube.com",
'<p><a href="https://href.li/?'
'https://www.youtube.com/a">via</a></p>\n<p>Caption</p>'
"<iframe>1</iframe>\n"
"<iframe>2</iframe>\n"
"<iframe>3</iframe>\n",
"2017-07-07-the-slug",
"2017-07-07 20:31:41+0000",
"testy",
["video"],
[],
"published",
"article",
"html",
)
],
posts,
posts)
posts,
)
@patch("pelican.tools.pelican_import._get_tumblr_posts")
def test_broken_video_embed(self, get):
@ -581,42 +668,43 @@ class TestTumblrImporter(TestCaseWithCLocale):
"timestamp": 1471192655,
"state": "published",
"format": "html",
"tags": [
"interviews"
],
"source_url":
"https://href.li/?https://www.youtube.com/watch?v=b",
"tags": ["interviews"],
"source_url": "https://href.li/?https://www.youtube.com/watch?v=b",
"source_title": "youtube.com",
"caption":
"<p>Caption</p>",
"caption": "<p>Caption</p>",
"player": [
{
"width": 250,
# If video is gone, embed_code is False
"embed_code": False
"embed_code": False,
},
{
"width": 400,
"embed_code": False
},
{
"width": 500,
"embed_code": False
}
{"width": 400, "embed_code": False},
{"width": 500, "embed_code": False},
],
"video_type": "youtube",
}
]
get.side_effect = get_posts
posts = list(tumblr2fields("api_key", "blogname"))
self.assertEqual(
[('youtube.com',
'<p><a href="https://href.li/?https://www.youtube.com/watch?'
'v=b">via</a></p>\n<p>Caption</p>'
'<p>(This video isn\'t available anymore.)</p>\n',
'2016-08-14-the-slug',
'2016-08-14 16:37:35+0000', 'testy', ['video'], ['interviews'],
'published', 'article', 'html')],
[
(
"youtube.com",
'<p><a href="https://href.li/?https://www.youtube.com/watch?'
'v=b">via</a></p>\n<p>Caption</p>'
"<p>(This video isn't available anymore.)</p>\n",
"2016-08-14-the-slug",
"2016-08-14 16:37:35+0000",
"testy",
["video"],
["interviews"],
"published",
"article",
"html",
)
],
posts,
posts)
posts,
)

View file

@ -35,48 +35,41 @@ class TestLog(unittest.TestCase):
def test_log_filter(self):
def do_logging():
for i in range(5):
self.logger.warning('Log %s', i)
self.logger.warning('Another log %s', i)
self.logger.warning("Log %s", i)
self.logger.warning("Another log %s", i)
# no filter
with self.reset_logger():
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 5)
self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING),
5)
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
5)
self.handler.count_logs("Another log \\d", logging.WARNING), 5
)
# filter by template
with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, 'Log %s'))
log.LimitFilter._ignore.add((logging.WARNING, "Log %s"))
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 0)
self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING),
0)
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
5)
self.handler.count_logs("Another log \\d", logging.WARNING), 5
)
# filter by exact message
with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, 'Log 3'))
log.LimitFilter._ignore.add((logging.WARNING, "Log 3"))
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 4)
self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING),
4)
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
5)
self.handler.count_logs("Another log \\d", logging.WARNING), 5
)
# filter by both
with self.reset_logger():
log.LimitFilter._ignore.add((logging.WARNING, 'Log 3'))
log.LimitFilter._ignore.add((logging.WARNING, 'Another log %s'))
log.LimitFilter._ignore.add((logging.WARNING, "Log 3"))
log.LimitFilter._ignore.add((logging.WARNING, "Another log %s"))
do_logging()
self.assertEqual(self.handler.count_logs("Log \\d", logging.WARNING), 4)
self.assertEqual(
self.handler.count_logs('Log \\d', logging.WARNING),
4)
self.assertEqual(
self.handler.count_logs('Another log \\d', logging.WARNING),
0)
self.handler.count_logs("Another log \\d", logging.WARNING), 0
)

View file

@ -17,17 +17,17 @@ class TestPage(unittest.TestCase):
def setUp(self):
super().setUp()
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, "C")
self.page_kwargs = {
'content': TEST_CONTENT,
'context': {
'localsiteurl': '',
"content": TEST_CONTENT,
"context": {
"localsiteurl": "",
},
'metadata': {
'summary': TEST_SUMMARY,
'title': 'foo bar',
"metadata": {
"summary": TEST_SUMMARY,
"title": "foo bar",
},
'source_path': '/path/to/file/foo.ext'
"source_path": "/path/to/file/foo.ext",
}
def tearDown(self):
@ -37,68 +37,79 @@ class TestPage(unittest.TestCase):
settings = get_settings()
# fix up pagination rules
from pelican.paginator import PaginationRule
pagination_rules = [
PaginationRule(*r) for r in settings.get(
'PAGINATION_PATTERNS',
DEFAULT_CONFIG['PAGINATION_PATTERNS'],
PaginationRule(*r)
for r in settings.get(
"PAGINATION_PATTERNS",
DEFAULT_CONFIG["PAGINATION_PATTERNS"],
)
]
settings['PAGINATION_PATTERNS'] = sorted(
settings["PAGINATION_PATTERNS"] = sorted(
pagination_rules,
key=lambda r: r[0],
)
self.page_kwargs['metadata']['author'] = Author('Blogger', settings)
object_list = [Article(**self.page_kwargs),
Article(**self.page_kwargs)]
paginator = Paginator('foobar.foo', 'foobar/foo', object_list,
settings)
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)]
paginator = Paginator("foobar.foo", "foobar/foo", object_list, settings)
page = paginator.page(1)
self.assertEqual(page.save_as, 'foobar.foo')
self.assertEqual(page.save_as, "foobar.foo")
def test_custom_pagination_pattern(self):
from pelican.paginator import PaginationRule
settings = get_settings()
settings['PAGINATION_PATTERNS'] = [PaginationRule(*r) for r in [
(1, '/{url}', '{base_name}/index.html'),
(2, '/{url}{number}/', '{base_name}/{number}/index.html')
]]
self.page_kwargs['metadata']['author'] = Author('Blogger', settings)
object_list = [Article(**self.page_kwargs),
Article(**self.page_kwargs)]
paginator = Paginator('blog/index.html', '//blog.my.site/',
object_list, settings, 1)
settings = get_settings()
settings["PAGINATION_PATTERNS"] = [
PaginationRule(*r)
for r in [
(1, "/{url}", "{base_name}/index.html"),
(2, "/{url}{number}/", "{base_name}/{number}/index.html"),
]
]
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [Article(**self.page_kwargs), Article(**self.page_kwargs)]
paginator = Paginator(
"blog/index.html", "//blog.my.site/", object_list, settings, 1
)
# The URL *has to* stay absolute (with // in the front), so verify that
page1 = paginator.page(1)
self.assertEqual(page1.save_as, 'blog/index.html')
self.assertEqual(page1.url, '//blog.my.site/')
self.assertEqual(page1.save_as, "blog/index.html")
self.assertEqual(page1.url, "//blog.my.site/")
page2 = paginator.page(2)
self.assertEqual(page2.save_as, 'blog/2/index.html')
self.assertEqual(page2.url, '//blog.my.site/2/')
self.assertEqual(page2.save_as, "blog/2/index.html")
self.assertEqual(page2.url, "//blog.my.site/2/")
def test_custom_pagination_pattern_last_page(self):
from pelican.paginator import PaginationRule
settings = get_settings()
settings['PAGINATION_PATTERNS'] = [PaginationRule(*r) for r in [
(1, '/{url}1/', '{base_name}/1/index.html'),
(2, '/{url}{number}/', '{base_name}/{number}/index.html'),
(-1, '/{url}', '{base_name}/index.html'),
]]
self.page_kwargs['metadata']['author'] = Author('Blogger', settings)
object_list = [Article(**self.page_kwargs),
Article(**self.page_kwargs),
Article(**self.page_kwargs)]
paginator = Paginator('blog/index.html', '//blog.my.site/',
object_list, settings, 1)
settings = get_settings()
settings["PAGINATION_PATTERNS"] = [
PaginationRule(*r)
for r in [
(1, "/{url}1/", "{base_name}/1/index.html"),
(2, "/{url}{number}/", "{base_name}/{number}/index.html"),
(-1, "/{url}", "{base_name}/index.html"),
]
]
self.page_kwargs["metadata"]["author"] = Author("Blogger", settings)
object_list = [
Article(**self.page_kwargs),
Article(**self.page_kwargs),
Article(**self.page_kwargs),
]
paginator = Paginator(
"blog/index.html", "//blog.my.site/", object_list, settings, 1
)
# The URL *has to* stay absolute (with // in the front), so verify that
page1 = paginator.page(1)
self.assertEqual(page1.save_as, 'blog/1/index.html')
self.assertEqual(page1.url, '//blog.my.site/1/')
self.assertEqual(page1.save_as, "blog/1/index.html")
self.assertEqual(page1.url, "//blog.my.site/1/")
page2 = paginator.page(2)
self.assertEqual(page2.save_as, 'blog/2/index.html')
self.assertEqual(page2.url, '//blog.my.site/2/')
self.assertEqual(page2.save_as, "blog/2/index.html")
self.assertEqual(page2.url, "//blog.my.site/2/")
page3 = paginator.page(3)
self.assertEqual(page3.save_as, 'blog/index.html')
self.assertEqual(page3.url, '//blog.my.site/')
self.assertEqual(page3.save_as, "blog/index.html")
self.assertEqual(page3.url, "//blog.my.site/")

View file

@ -20,9 +20,10 @@ from pelican.tests.support import (
)
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_PATH = os.path.abspath(os.path.join(
CURRENT_DIR, os.pardir, os.pardir, 'samples'))
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output'))
SAMPLES_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, os.pardir, os.pardir, "samples")
)
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, "output"))
INPUT_PATH = os.path.join(SAMPLES_PATH, "content")
SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py")
@ -31,9 +32,9 @@ SAMPLE_FR_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf_FR.py")
def recursiveDiff(dcmp):
diff = {
'diff_files': [os.path.join(dcmp.right, f) for f in dcmp.diff_files],
'left_only': [os.path.join(dcmp.right, f) for f in dcmp.left_only],
'right_only': [os.path.join(dcmp.right, f) for f in dcmp.right_only],
"diff_files": [os.path.join(dcmp.right, f) for f in dcmp.diff_files],
"left_only": [os.path.join(dcmp.right, f) for f in dcmp.left_only],
"right_only": [os.path.join(dcmp.right, f) for f in dcmp.right_only],
}
for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).items():
@ -47,11 +48,11 @@ class TestPelican(LoggedTestCase):
def setUp(self):
super().setUp()
self.temp_path = mkdtemp(prefix='pelicantests.')
self.temp_cache = mkdtemp(prefix='pelican_cache.')
self.temp_path = mkdtemp(prefix="pelicantests.")
self.temp_cache = mkdtemp(prefix="pelican_cache.")
self.maxDiff = None
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, "C")
def tearDown(self):
read_settings() # cleanup PYGMENTS_RST_OPTIONS
@ -70,8 +71,8 @@ class TestPelican(LoggedTestCase):
if proc.returncode != 0:
msg = self._formatMessage(
msg,
"%s and %s differ:\nstdout:\n%s\nstderr\n%s" %
(left_path, right_path, out, err)
"%s and %s differ:\nstdout:\n%s\nstderr\n%s"
% (left_path, right_path, out, err),
)
raise self.failureException(msg)
@ -85,136 +86,154 @@ class TestPelican(LoggedTestCase):
self.assertTrue(
generator_classes[-1] is StaticGenerator,
"StaticGenerator must be the last generator, but it isn't!")
"StaticGenerator must be the last generator, but it isn't!",
)
self.assertIsInstance(
generator_classes, Sequence,
"_get_generator_classes() must return a Sequence to preserve order")
generator_classes,
Sequence,
"_get_generator_classes() must return a Sequence to preserve order",
)
@skipIfNoExecutable(['git', '--version'])
@skipIfNoExecutable(["git", "--version"])
def test_basic_generation_works(self):
# when running pelican without settings, it should pick up the default
# ones and generate correct output without raising any exception
settings = read_settings(path=None, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'LOCALE': locale.normalize('en_US'),
})
settings = read_settings(
path=None,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"LOCALE": locale.normalize("en_US"),
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(
self.temp_path, os.path.join(OUTPUT_PATH, 'basic')
)
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "basic"))
self.assertLogCountEqual(
count=1,
msg="Unable to find.*skipping url replacement",
level=logging.WARNING)
level=logging.WARNING,
)
@skipIfNoExecutable(['git', '--version'])
@skipIfNoExecutable(["git", "--version"])
def test_custom_generation_works(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'LOCALE': locale.normalize('en_US.UTF-8'),
})
settings = read_settings(
path=SAMPLE_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"LOCALE": locale.normalize("en_US.UTF-8"),
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(
self.temp_path, os.path.join(OUTPUT_PATH, 'custom')
)
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "custom"))
@skipIfNoExecutable(['git', '--version'])
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'), 'French locale needed')
@skipIfNoExecutable(["git", "--version"])
@unittest.skipUnless(
locale_available("fr_FR.UTF-8") or locale_available("French"),
"French locale needed",
)
def test_custom_locale_generation_works(self):
'''Test that generation with fr_FR.UTF-8 locale works'''
if sys.platform == 'win32':
our_locale = 'French'
"""Test that generation with fr_FR.UTF-8 locale works"""
if sys.platform == "win32":
our_locale = "French"
else:
our_locale = 'fr_FR.UTF-8'
our_locale = "fr_FR.UTF-8"
settings = read_settings(path=SAMPLE_FR_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'LOCALE': our_locale,
})
settings = read_settings(
path=SAMPLE_FR_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"LOCALE": our_locale,
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(
self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale')
)
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, "custom_locale"))
def test_theme_static_paths_copy(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'),
os.path.join(SAMPLES_PATH, 'kinda'),
os.path.join(SAMPLES_PATH,
'theme_standard')]
})
settings = read_settings(
path=SAMPLE_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"THEME_STATIC_PATHS": [
os.path.join(SAMPLES_PATH, "very"),
os.path.join(SAMPLES_PATH, "kinda"),
os.path.join(SAMPLES_PATH, "theme_standard"),
],
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme')
extra_path = os.path.join(theme_output, 'exciting', 'new', 'files')
theme_output = os.path.join(self.temp_path, "theme")
extra_path = os.path.join(theme_output, "exciting", "new", "files")
for file in ['a_stylesheet', 'a_template']:
for file in ["a_stylesheet", "a_template"]:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
for file in ['wow!', 'boom!', 'bap!', 'zap!']:
for file in ["wow!", "boom!", "bap!", "zap!"]:
self.assertTrue(os.path.exists(os.path.join(extra_path, file)))
def test_theme_static_paths_copy_single_file(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH,
'theme_standard')]
})
settings = read_settings(
path=SAMPLE_CONFIG,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"THEME_STATIC_PATHS": [os.path.join(SAMPLES_PATH, "theme_standard")],
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme')
theme_output = os.path.join(self.temp_path, "theme")
for file in ['a_stylesheet', 'a_template']:
for file in ["a_stylesheet", "a_template"]:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
def test_write_only_selected(self):
"""Test that only the selected files are written"""
settings = read_settings(path=None, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'WRITE_SELECTED': [
os.path.join(self.temp_path, 'oh-yeah.html'),
os.path.join(self.temp_path, 'categories.html'),
],
'LOCALE': locale.normalize('en_US'),
})
settings = read_settings(
path=None,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"WRITE_SELECTED": [
os.path.join(self.temp_path, "oh-yeah.html"),
os.path.join(self.temp_path, "categories.html"),
],
"LOCALE": locale.normalize("en_US"),
},
)
pelican = Pelican(settings=settings)
logger = logging.getLogger()
orig_level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
mute(True)(pelican.run)()
logger.setLevel(orig_level)
self.assertLogCountEqual(
count=2,
msg="Writing .*",
level=logging.INFO)
self.assertLogCountEqual(count=2, msg="Writing .*", level=logging.INFO)
def test_cyclic_intersite_links_no_warnings(self):
settings = read_settings(path=None, override={
'PATH': os.path.join(CURRENT_DIR, 'cyclic_intersite_links'),
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
})
settings = read_settings(
path=None,
override={
"PATH": os.path.join(CURRENT_DIR, "cyclic_intersite_links"),
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
# There are four different intersite links:
@ -230,41 +249,48 @@ class TestPelican(LoggedTestCase):
self.assertLogCountEqual(
count=1,
msg="Unable to find '.*\\.rst', skipping url replacement.",
level=logging.WARNING)
level=logging.WARNING,
)
def test_md_extensions_deprecation(self):
"""Test that a warning is issued if MD_EXTENSIONS is used"""
settings = read_settings(path=None, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'MD_EXTENSIONS': {},
})
settings = read_settings(
path=None,
override={
"PATH": INPUT_PATH,
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
"MD_EXTENSIONS": {},
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertLogCountEqual(
count=1,
msg="MD_EXTENSIONS is deprecated use MARKDOWN instead.",
level=logging.WARNING)
level=logging.WARNING,
)
def test_parse_errors(self):
# Verify that just an error is printed and the application doesn't
# abort, exit or something.
settings = read_settings(path=None, override={
'PATH': os.path.abspath(os.path.join(CURRENT_DIR, 'parse_error')),
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
})
settings = read_settings(
path=None,
override={
"PATH": os.path.abspath(os.path.join(CURRENT_DIR, "parse_error")),
"OUTPUT_PATH": self.temp_path,
"CACHE_PATH": self.temp_cache,
},
)
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertLogCountEqual(
count=1,
msg="Could not process .*parse_error.rst",
level=logging.ERROR)
count=1, msg="Could not process .*parse_error.rst", level=logging.ERROR
)
def test_module_load(self):
"""Test loading via python -m pelican --help displays the help"""
output = subprocess.check_output([
sys.executable, '-m', 'pelican', '--help'
]).decode('ascii', 'replace')
assert 'usage:' in output
output = subprocess.check_output(
[sys.executable, "-m", "pelican", "--help"]
).decode("ascii", "replace")
assert "usage:" in output

View file

@ -2,27 +2,26 @@ import os
from contextlib import contextmanager
import pelican.tests.dummy_plugins.normal_plugin.normal_plugin as normal_plugin
from pelican.plugins._utils import (get_namespace_plugins, get_plugin_name,
load_plugins)
from pelican.plugins._utils import get_namespace_plugins, get_plugin_name, load_plugins
from pelican.tests.support import unittest
@contextmanager
def tmp_namespace_path(path):
'''Context manager for temporarily appending namespace plugin packages
"""Context manager for temporarily appending namespace plugin packages
path: path containing the `pelican` folder
This modifies the `pelican.__path__` and lets the `pelican.plugins`
namespace package resolve it from that.
'''
"""
# This avoids calls to internal `pelican.plugins.__path__._recalculate()`
# as it should not be necessary
import pelican
old_path = pelican.__path__[:]
try:
pelican.__path__.append(os.path.join(path, 'pelican'))
pelican.__path__.append(os.path.join(path, "pelican"))
yield
finally:
pelican.__path__ = old_path
@ -30,38 +29,38 @@ def tmp_namespace_path(path):
class PluginTest(unittest.TestCase):
_PLUGIN_FOLDER = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'dummy_plugins')
_NS_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, 'namespace_plugin')
_NORMAL_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, 'normal_plugin')
os.path.abspath(os.path.dirname(__file__)), "dummy_plugins"
)
_NS_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, "namespace_plugin")
_NORMAL_PLUGIN_FOLDER = os.path.join(_PLUGIN_FOLDER, "normal_plugin")
def test_namespace_path_modification(self):
import pelican
import pelican.plugins
old_path = pelican.__path__[:]
# not existing path
path = os.path.join(self._PLUGIN_FOLDER, 'foo')
path = os.path.join(self._PLUGIN_FOLDER, "foo")
with tmp_namespace_path(path):
self.assertIn(
os.path.join(path, 'pelican'),
pelican.__path__)
self.assertIn(os.path.join(path, "pelican"), pelican.__path__)
# foo/pelican does not exist, so it won't propagate
self.assertNotIn(
os.path.join(path, 'pelican', 'plugins'),
pelican.plugins.__path__)
os.path.join(path, "pelican", "plugins"), pelican.plugins.__path__
)
# verify that we restored path back
self.assertEqual(pelican.__path__, old_path)
# existing path
with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
self.assertIn(
os.path.join(self._NS_PLUGIN_FOLDER, 'pelican'),
pelican.__path__)
os.path.join(self._NS_PLUGIN_FOLDER, "pelican"), pelican.__path__
)
# /namespace_plugin/pelican exists, so it should be in
self.assertIn(
os.path.join(self._NS_PLUGIN_FOLDER, 'pelican', 'plugins'),
pelican.plugins.__path__)
os.path.join(self._NS_PLUGIN_FOLDER, "pelican", "plugins"),
pelican.plugins.__path__,
)
self.assertEqual(pelican.__path__, old_path)
def test_get_namespace_plugins(self):
@ -71,11 +70,11 @@ class PluginTest(unittest.TestCase):
# with plugin
with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
ns_plugins = get_namespace_plugins()
self.assertEqual(len(ns_plugins), len(existing_ns_plugins)+1)
self.assertIn('pelican.plugins.ns_plugin', ns_plugins)
self.assertEqual(len(ns_plugins), len(existing_ns_plugins) + 1)
self.assertIn("pelican.plugins.ns_plugin", ns_plugins)
self.assertEqual(
ns_plugins['pelican.plugins.ns_plugin'].NAME,
'namespace plugin')
ns_plugins["pelican.plugins.ns_plugin"].NAME, "namespace plugin"
)
# should be back to existing namespace plugins outside `with`
ns_plugins = get_namespace_plugins()
@ -91,15 +90,14 @@ class PluginTest(unittest.TestCase):
with tmp_namespace_path(self._NS_PLUGIN_FOLDER):
# with no `PLUGINS` setting, load namespace plugins
plugins = load_plugins({})
self.assertEqual(len(plugins), len(existing_ns_plugins)+1, plugins)
self.assertEqual(len(plugins), len(existing_ns_plugins) + 1, plugins)
self.assertEqual(
{'pelican.plugins.ns_plugin'} | get_plugin_names(existing_ns_plugins),
get_plugin_names(plugins))
{"pelican.plugins.ns_plugin"} | get_plugin_names(existing_ns_plugins),
get_plugin_names(plugins),
)
# disable namespace plugins with `PLUGINS = []`
SETTINGS = {
'PLUGINS': []
}
SETTINGS = {"PLUGINS": []}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 0, plugins)
@ -107,34 +105,35 @@ class PluginTest(unittest.TestCase):
# normal plugin
SETTINGS = {
'PLUGINS': ['normal_plugin'],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER]
"PLUGINS": ["normal_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(
{'normal_plugin'},
get_plugin_names(plugins))
self.assertEqual({"normal_plugin"}, get_plugin_names(plugins))
# normal submodule/subpackage plugins
SETTINGS = {
'PLUGINS': [
'normal_submodule_plugin.subplugin',
'normal_submodule_plugin.subpackage.subpackage',
"PLUGINS": [
"normal_submodule_plugin.subplugin",
"normal_submodule_plugin.subpackage.subpackage",
],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER]
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 2, plugins)
self.assertEqual(
{'normal_submodule_plugin.subplugin',
'normal_submodule_plugin.subpackage.subpackage'},
get_plugin_names(plugins))
{
"normal_submodule_plugin.subplugin",
"normal_submodule_plugin.subpackage.subpackage",
},
get_plugin_names(plugins),
)
# ensure normal plugins are loaded only once
SETTINGS = {
'PLUGINS': ['normal_plugin'],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER],
"PLUGINS": ["normal_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
for plugin in load_plugins(SETTINGS):
@ -143,40 +142,33 @@ class PluginTest(unittest.TestCase):
self.assertIn(plugin, plugins)
# namespace plugin short
SETTINGS = {
'PLUGINS': ['ns_plugin']
}
SETTINGS = {"PLUGINS": ["ns_plugin"]}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(
{'pelican.plugins.ns_plugin'},
get_plugin_names(plugins))
self.assertEqual({"pelican.plugins.ns_plugin"}, get_plugin_names(plugins))
# namespace plugin long
SETTINGS = {
'PLUGINS': ['pelican.plugins.ns_plugin']
}
SETTINGS = {"PLUGINS": ["pelican.plugins.ns_plugin"]}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(
{'pelican.plugins.ns_plugin'},
get_plugin_names(plugins))
self.assertEqual({"pelican.plugins.ns_plugin"}, get_plugin_names(plugins))
# normal and namespace plugin
SETTINGS = {
'PLUGINS': ['normal_plugin', 'ns_plugin'],
'PLUGIN_PATHS': [self._NORMAL_PLUGIN_FOLDER]
"PLUGINS": ["normal_plugin", "ns_plugin"],
"PLUGIN_PATHS": [self._NORMAL_PLUGIN_FOLDER],
}
plugins = load_plugins(SETTINGS)
self.assertEqual(len(plugins), 2, plugins)
self.assertEqual(
{'normal_plugin', 'pelican.plugins.ns_plugin'},
get_plugin_names(plugins))
{"normal_plugin", "pelican.plugins.ns_plugin"},
get_plugin_names(plugins),
)
def test_get_plugin_name(self):
self.assertEqual(
get_plugin_name(normal_plugin),
'pelican.tests.dummy_plugins.normal_plugin.normal_plugin',
"pelican.tests.dummy_plugins.normal_plugin.normal_plugin",
)
class NoopPlugin:
@ -185,7 +177,9 @@ class PluginTest(unittest.TestCase):
self.assertEqual(
get_plugin_name(NoopPlugin),
'PluginTest.test_get_plugin_name.<locals>.NoopPlugin')
"PluginTest.test_get_plugin_name.<locals>.NoopPlugin",
)
self.assertEqual(
get_plugin_name(NoopPlugin()),
'PluginTest.test_get_plugin_name.<locals>.NoopPlugin')
"PluginTest.test_get_plugin_name.<locals>.NoopPlugin",
)

File diff suppressed because it is too large Load diff

View file

@ -6,11 +6,11 @@ from pelican.tests.support import unittest
class Test_abbr_role(unittest.TestCase):
def call_it(self, text):
from pelican.rstdirectives import abbr_role
rawtext = text
lineno = 42
inliner = Mock(name='inliner')
nodes, system_messages = abbr_role(
'abbr', rawtext, text, lineno, inliner)
inliner = Mock(name="inliner")
nodes, system_messages = abbr_role("abbr", rawtext, text, lineno, inliner)
self.assertEqual(system_messages, [])
self.assertEqual(len(nodes), 1)
return nodes[0]
@ -18,14 +18,14 @@ class Test_abbr_role(unittest.TestCase):
def test(self):
node = self.call_it("Abbr (Abbreviation)")
self.assertEqual(node.astext(), "Abbr")
self.assertEqual(node['explanation'], "Abbreviation")
self.assertEqual(node["explanation"], "Abbreviation")
def test_newlines_in_explanation(self):
node = self.call_it("CUL (See you\nlater)")
self.assertEqual(node.astext(), "CUL")
self.assertEqual(node['explanation'], "See you\nlater")
self.assertEqual(node["explanation"], "See you\nlater")
def test_newlines_in_abbr(self):
node = self.call_it("US of\nA \n (USA)")
self.assertEqual(node.astext(), "US of\nA")
self.assertEqual(node['explanation'], "USA")
self.assertEqual(node["explanation"], "USA")

View file

@ -17,10 +17,9 @@ class MockServer:
class TestServer(unittest.TestCase):
def setUp(self):
self.server = MockServer()
self.temp_output = mkdtemp(prefix='pelicantests.')
self.temp_output = mkdtemp(prefix="pelicantests.")
self.old_cwd = os.getcwd()
os.chdir(self.temp_output)
@ -29,32 +28,33 @@ class TestServer(unittest.TestCase):
rmtree(self.temp_output)
def test_get_path_that_exists(self):
handler = ComplexHTTPRequestHandler(MockRequest(), ('0.0.0.0', 8888),
self.server)
handler = ComplexHTTPRequestHandler(
MockRequest(), ("0.0.0.0", 8888), self.server
)
handler.base_path = self.temp_output
open(os.path.join(self.temp_output, 'foo.html'), 'a').close()
os.mkdir(os.path.join(self.temp_output, 'foo'))
open(os.path.join(self.temp_output, 'foo', 'index.html'), 'a').close()
open(os.path.join(self.temp_output, "foo.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, "foo"))
open(os.path.join(self.temp_output, "foo", "index.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, 'bar'))
open(os.path.join(self.temp_output, 'bar', 'index.html'), 'a').close()
os.mkdir(os.path.join(self.temp_output, "bar"))
open(os.path.join(self.temp_output, "bar", "index.html"), "a").close()
os.mkdir(os.path.join(self.temp_output, 'baz'))
os.mkdir(os.path.join(self.temp_output, "baz"))
for suffix in ['', '/']:
for suffix in ["", "/"]:
# foo.html has precedence over foo/index.html
path = handler.get_path_that_exists('foo' + suffix)
self.assertEqual(path, 'foo.html')
path = handler.get_path_that_exists("foo" + suffix)
self.assertEqual(path, "foo.html")
# folder with index.html should return folder/index.html
path = handler.get_path_that_exists('bar' + suffix)
self.assertEqual(path, 'bar/index.html')
path = handler.get_path_that_exists("bar" + suffix)
self.assertEqual(path, "bar/index.html")
# folder without index.html should return same as input
path = handler.get_path_that_exists('baz' + suffix)
self.assertEqual(path, 'baz' + suffix)
path = handler.get_path_that_exists("baz" + suffix)
self.assertEqual(path, "baz" + suffix)
# not existing path should return None
path = handler.get_path_that_exists('quux' + suffix)
path = handler.get_path_that_exists("quux" + suffix)
self.assertIsNone(path)

View file

@ -4,10 +4,14 @@ import os
from os.path import abspath, dirname, join
from pelican.settings import (DEFAULT_CONFIG, DEFAULT_THEME,
_printf_s_to_format_field,
configure_settings,
handle_deprecated_settings, read_settings)
from pelican.settings import (
DEFAULT_CONFIG,
DEFAULT_THEME,
_printf_s_to_format_field,
configure_settings,
handle_deprecated_settings,
read_settings,
)
from pelican.tests.support import unittest
@ -16,40 +20,39 @@ class TestSettingsConfiguration(unittest.TestCase):
append new values to the settings (if any), and apply basic settings
optimizations.
"""
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, "C")
self.PATH = abspath(dirname(__file__))
default_conf = join(self.PATH, 'default_conf.py')
default_conf = join(self.PATH, "default_conf.py")
self.settings = read_settings(default_conf)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_overwrite_existing_settings(self):
self.assertEqual(self.settings.get('SITENAME'), "Alexis' log")
self.assertEqual(
self.settings.get('SITEURL'),
'http://blog.notmyidea.org')
self.assertEqual(self.settings.get("SITENAME"), "Alexis' log")
self.assertEqual(self.settings.get("SITEURL"), "http://blog.notmyidea.org")
def test_keep_default_settings(self):
# Keep default settings if not defined.
self.assertEqual(
self.settings.get('DEFAULT_CATEGORY'),
DEFAULT_CONFIG['DEFAULT_CATEGORY'])
self.settings.get("DEFAULT_CATEGORY"), DEFAULT_CONFIG["DEFAULT_CATEGORY"]
)
def test_dont_copy_small_keys(self):
# Do not copy keys not in caps.
self.assertNotIn('foobar', self.settings)
self.assertNotIn("foobar", self.settings)
def test_read_empty_settings(self):
# Ensure an empty settings file results in default settings.
settings = read_settings(None)
expected = copy.deepcopy(DEFAULT_CONFIG)
# Added by configure settings
expected['FEED_DOMAIN'] = ''
expected['ARTICLE_EXCLUDES'] = ['pages']
expected['PAGE_EXCLUDES'] = ['']
expected["FEED_DOMAIN"] = ""
expected["ARTICLE_EXCLUDES"] = ["pages"]
expected["PAGE_EXCLUDES"] = [""]
self.maxDiff = None
self.assertDictEqual(settings, expected)
@ -57,250 +60,265 @@ class TestSettingsConfiguration(unittest.TestCase):
# Make sure that the results from one settings call doesn't
# effect past or future instances.
self.PATH = abspath(dirname(__file__))
default_conf = join(self.PATH, 'default_conf.py')
default_conf = join(self.PATH, "default_conf.py")
settings = read_settings(default_conf)
settings['SITEURL'] = 'new-value'
settings["SITEURL"] = "new-value"
new_settings = read_settings(default_conf)
self.assertNotEqual(new_settings['SITEURL'], settings['SITEURL'])
self.assertNotEqual(new_settings["SITEURL"], settings["SITEURL"])
def test_defaults_not_overwritten(self):
# This assumes 'SITENAME': 'A Pelican Blog'
settings = read_settings(None)
settings['SITENAME'] = 'Not a Pelican Blog'
self.assertNotEqual(settings['SITENAME'], DEFAULT_CONFIG['SITENAME'])
settings["SITENAME"] = "Not a Pelican Blog"
self.assertNotEqual(settings["SITENAME"], DEFAULT_CONFIG["SITENAME"])
def test_static_path_settings_safety(self):
# Disallow static paths from being strings
settings = {
'STATIC_PATHS': 'foo/bar',
'THEME_STATIC_PATHS': 'bar/baz',
"STATIC_PATHS": "foo/bar",
"THEME_STATIC_PATHS": "bar/baz",
# These 4 settings are required to run configure_settings
'PATH': '.',
'THEME': DEFAULT_THEME,
'SITEURL': 'http://blog.notmyidea.org/',
'LOCALE': '',
"PATH": ".",
"THEME": DEFAULT_THEME,
"SITEURL": "http://blog.notmyidea.org/",
"LOCALE": "",
}
configure_settings(settings)
self.assertEqual(settings["STATIC_PATHS"], DEFAULT_CONFIG["STATIC_PATHS"])
self.assertEqual(
settings['STATIC_PATHS'],
DEFAULT_CONFIG['STATIC_PATHS'])
self.assertEqual(
settings['THEME_STATIC_PATHS'],
DEFAULT_CONFIG['THEME_STATIC_PATHS'])
settings["THEME_STATIC_PATHS"], DEFAULT_CONFIG["THEME_STATIC_PATHS"]
)
def test_configure_settings(self):
# Manipulations to settings should be applied correctly.
settings = {
'SITEURL': 'http://blog.notmyidea.org/',
'LOCALE': '',
'PATH': os.curdir,
'THEME': DEFAULT_THEME,
"SITEURL": "http://blog.notmyidea.org/",
"LOCALE": "",
"PATH": os.curdir,
"THEME": DEFAULT_THEME,
}
configure_settings(settings)
# SITEURL should not have a trailing slash
self.assertEqual(settings['SITEURL'], 'http://blog.notmyidea.org')
self.assertEqual(settings["SITEURL"], "http://blog.notmyidea.org")
# FEED_DOMAIN, if undefined, should default to SITEURL
self.assertEqual(settings['FEED_DOMAIN'], 'http://blog.notmyidea.org')
self.assertEqual(settings["FEED_DOMAIN"], "http://blog.notmyidea.org")
settings['FEED_DOMAIN'] = 'http://feeds.example.com'
settings["FEED_DOMAIN"] = "http://feeds.example.com"
configure_settings(settings)
self.assertEqual(settings['FEED_DOMAIN'], 'http://feeds.example.com')
self.assertEqual(settings["FEED_DOMAIN"], "http://feeds.example.com")
def test_theme_settings_exceptions(self):
settings = self.settings
# Check that theme lookup in "pelican/themes" functions as expected
settings['THEME'] = os.path.split(settings['THEME'])[1]
settings["THEME"] = os.path.split(settings["THEME"])[1]
configure_settings(settings)
self.assertEqual(settings['THEME'], DEFAULT_THEME)
self.assertEqual(settings["THEME"], DEFAULT_THEME)
# Check that non-existent theme raises exception
settings['THEME'] = 'foo'
settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings)
def test_deprecated_dir_setting(self):
settings = self.settings
settings['ARTICLE_DIR'] = 'foo'
settings['PAGE_DIR'] = 'bar'
settings["ARTICLE_DIR"] = "foo"
settings["PAGE_DIR"] = "bar"
settings = handle_deprecated_settings(settings)
self.assertEqual(settings['ARTICLE_PATHS'], ['foo'])
self.assertEqual(settings['PAGE_PATHS'], ['bar'])
self.assertEqual(settings["ARTICLE_PATHS"], ["foo"])
self.assertEqual(settings["PAGE_PATHS"], ["bar"])
with self.assertRaises(KeyError):
settings['ARTICLE_DIR']
settings['PAGE_DIR']
settings["ARTICLE_DIR"]
settings["PAGE_DIR"]
def test_default_encoding(self):
# Test that the user locale is set if not specified in settings
locale.setlocale(locale.LC_ALL, 'C')
locale.setlocale(locale.LC_ALL, "C")
# empty string = user system locale
self.assertEqual(self.settings['LOCALE'], [''])
self.assertEqual(self.settings["LOCALE"], [""])
configure_settings(self.settings)
lc_time = locale.getlocale(locale.LC_TIME) # should be set to user locale
# explicitly set locale to user pref and test
locale.setlocale(locale.LC_TIME, '')
locale.setlocale(locale.LC_TIME, "")
self.assertEqual(lc_time, locale.getlocale(locale.LC_TIME))
def test_invalid_settings_throw_exception(self):
# Test that the path name is valid
# test that 'PATH' is set
settings = {
}
settings = {}
self.assertRaises(Exception, configure_settings, settings)
# Test that 'PATH' is valid
settings['PATH'] = ''
settings["PATH"] = ""
self.assertRaises(Exception, configure_settings, settings)
# Test nonexistent THEME
settings['PATH'] = os.curdir
settings['THEME'] = 'foo'
settings["PATH"] = os.curdir
settings["THEME"] = "foo"
self.assertRaises(Exception, configure_settings, settings)
def test__printf_s_to_format_field(self):
for s in ('%s', '{%s}', '{%s'):
option = 'foo/{}/bar.baz'.format(s)
result = _printf_s_to_format_field(option, 'slug')
expected = option % 'qux'
found = result.format(slug='qux')
for s in ("%s", "{%s}", "{%s"):
option = "foo/{}/bar.baz".format(s)
result = _printf_s_to_format_field(option, "slug")
expected = option % "qux"
found = result.format(slug="qux")
self.assertEqual(expected, found)
def test_deprecated_extra_templates_paths(self):
settings = self.settings
settings['EXTRA_TEMPLATES_PATHS'] = ['/foo/bar', '/ha']
settings["EXTRA_TEMPLATES_PATHS"] = ["/foo/bar", "/ha"]
settings = handle_deprecated_settings(settings)
self.assertEqual(settings['THEME_TEMPLATES_OVERRIDES'],
['/foo/bar', '/ha'])
self.assertNotIn('EXTRA_TEMPLATES_PATHS', settings)
self.assertEqual(settings["THEME_TEMPLATES_OVERRIDES"], ["/foo/bar", "/ha"])
self.assertNotIn("EXTRA_TEMPLATES_PATHS", settings)
def test_deprecated_paginated_direct_templates(self):
settings = self.settings
settings['PAGINATED_DIRECT_TEMPLATES'] = ['index', 'archives']
settings['PAGINATED_TEMPLATES'] = {'index': 10, 'category': None}
settings["PAGINATED_DIRECT_TEMPLATES"] = ["index", "archives"]
settings["PAGINATED_TEMPLATES"] = {"index": 10, "category": None}
settings = handle_deprecated_settings(settings)
self.assertEqual(settings['PAGINATED_TEMPLATES'],
{'index': 10, 'category': None, 'archives': None})
self.assertNotIn('PAGINATED_DIRECT_TEMPLATES', settings)
self.assertEqual(
settings["PAGINATED_TEMPLATES"],
{"index": 10, "category": None, "archives": None},
)
self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_deprecated_paginated_direct_templates_from_file(self):
# This is equivalent to reading a settings file that has
# PAGINATED_DIRECT_TEMPLATES defined but no PAGINATED_TEMPLATES.
settings = read_settings(None, override={
'PAGINATED_DIRECT_TEMPLATES': ['index', 'archives']
})
self.assertEqual(settings['PAGINATED_TEMPLATES'], {
'archives': None,
'author': None,
'index': None,
'category': None,
'tag': None})
self.assertNotIn('PAGINATED_DIRECT_TEMPLATES', settings)
settings = read_settings(
None, override={"PAGINATED_DIRECT_TEMPLATES": ["index", "archives"]}
)
self.assertEqual(
settings["PAGINATED_TEMPLATES"],
{
"archives": None,
"author": None,
"index": None,
"category": None,
"tag": None,
},
)
self.assertNotIn("PAGINATED_DIRECT_TEMPLATES", settings)
def test_theme_and_extra_templates_exception(self):
settings = self.settings
settings['EXTRA_TEMPLATES_PATHS'] = ['/ha']
settings['THEME_TEMPLATES_OVERRIDES'] = ['/foo/bar']
settings["EXTRA_TEMPLATES_PATHS"] = ["/ha"]
settings["THEME_TEMPLATES_OVERRIDES"] = ["/foo/bar"]
self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_slug_and_slug_regex_substitutions_exception(self):
settings = {}
settings['SLUG_REGEX_SUBSTITUTIONS'] = [('C++', 'cpp')]
settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')]
settings["SLUG_REGEX_SUBSTITUTIONS"] = [("C++", "cpp")]
settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
self.assertRaises(Exception, handle_deprecated_settings, settings)
def test_deprecated_slug_substitutions(self):
default_slug_regex_subs = self.settings['SLUG_REGEX_SUBSTITUTIONS']
default_slug_regex_subs = self.settings["SLUG_REGEX_SUBSTITUTIONS"]
# If no deprecated setting is set, don't set new ones
settings = {}
settings = handle_deprecated_settings(settings)
self.assertNotIn('SLUG_REGEX_SUBSTITUTIONS', settings)
self.assertNotIn('TAG_REGEX_SUBSTITUTIONS', settings)
self.assertNotIn('CATEGORY_REGEX_SUBSTITUTIONS', settings)
self.assertNotIn('AUTHOR_REGEX_SUBSTITUTIONS', settings)
self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("AUTHOR_REGEX_SUBSTITUTIONS", settings)
# If SLUG_SUBSTITUTIONS is set, set {SLUG, AUTHOR}_REGEX_SUBSTITUTIONS
# correctly, don't set {CATEGORY, TAG}_REGEX_SUBSTITUTIONS
settings = {}
settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp')]
settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
settings = handle_deprecated_settings(settings)
self.assertEqual(settings.get('SLUG_REGEX_SUBSTITUTIONS'),
[(r'C\+\+', 'cpp')] + default_slug_regex_subs)
self.assertNotIn('TAG_REGEX_SUBSTITUTIONS', settings)
self.assertNotIn('CATEGORY_REGEX_SUBSTITUTIONS', settings)
self.assertEqual(settings.get('AUTHOR_REGEX_SUBSTITUTIONS'),
default_slug_regex_subs)
self.assertEqual(
settings.get("SLUG_REGEX_SUBSTITUTIONS"),
[(r"C\+\+", "cpp")] + default_slug_regex_subs,
)
self.assertNotIn("TAG_REGEX_SUBSTITUTIONS", settings)
self.assertNotIn("CATEGORY_REGEX_SUBSTITUTIONS", settings)
self.assertEqual(
settings.get("AUTHOR_REGEX_SUBSTITUTIONS"), default_slug_regex_subs
)
# If {CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly, don't set
# SLUG_REGEX_SUBSTITUTIONS
settings = {}
settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')]
settings['CATEGORY_SUBSTITUTIONS'] = [('C#', 'csharp')]
settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov')]
settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings)
self.assertNotIn('SLUG_REGEX_SUBSTITUTIONS', settings)
self.assertEqual(settings['TAG_REGEX_SUBSTITUTIONS'],
[(r'C\#', 'csharp')] + default_slug_regex_subs)
self.assertEqual(settings['CATEGORY_REGEX_SUBSTITUTIONS'],
[(r'C\#', 'csharp')] + default_slug_regex_subs)
self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'],
[(r'Alexander\ Todorov', 'atodorov')] +
default_slug_regex_subs)
self.assertNotIn("SLUG_REGEX_SUBSTITUTIONS", settings)
self.assertEqual(
settings["TAG_REGEX_SUBSTITUTIONS"],
[(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["CATEGORY_REGEX_SUBSTITUTIONS"],
[(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
# If {SLUG, CATEGORY, TAG, AUTHOR}_SUBSTITUTIONS are set, set
# {SLUG, CATEGORY, TAG, AUTHOR}_REGEX_SUBSTITUTIONS correctly
settings = {}
settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp')]
settings['TAG_SUBSTITUTIONS'] = [('C#', 'csharp')]
settings['CATEGORY_SUBSTITUTIONS'] = [('C#', 'csharp')]
settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov')]
settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp")]
settings["TAG_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["CATEGORY_SUBSTITUTIONS"] = [("C#", "csharp")]
settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov")]
settings = handle_deprecated_settings(settings)
self.assertEqual(settings['TAG_REGEX_SUBSTITUTIONS'],
[(r'C\+\+', 'cpp')] + [(r'C\#', 'csharp')] +
default_slug_regex_subs)
self.assertEqual(settings['CATEGORY_REGEX_SUBSTITUTIONS'],
[(r'C\+\+', 'cpp')] + [(r'C\#', 'csharp')] +
default_slug_regex_subs)
self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'],
[(r'Alexander\ Todorov', 'atodorov')] +
default_slug_regex_subs)
self.assertEqual(
settings["TAG_REGEX_SUBSTITUTIONS"],
[(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["CATEGORY_REGEX_SUBSTITUTIONS"],
[(r"C\+\+", "cpp")] + [(r"C\#", "csharp")] + default_slug_regex_subs,
)
self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
# Handle old 'skip' flags correctly
settings = {}
settings['SLUG_SUBSTITUTIONS'] = [('C++', 'cpp', True)]
settings['AUTHOR_SUBSTITUTIONS'] = [('Alexander Todorov', 'atodorov',
False)]
settings["SLUG_SUBSTITUTIONS"] = [("C++", "cpp", True)]
settings["AUTHOR_SUBSTITUTIONS"] = [("Alexander Todorov", "atodorov", False)]
settings = handle_deprecated_settings(settings)
self.assertEqual(settings.get('SLUG_REGEX_SUBSTITUTIONS'),
[(r'C\+\+', 'cpp')] +
[(r'(?u)\A\s*', ''), (r'(?u)\s*\Z', '')])
self.assertEqual(settings['AUTHOR_REGEX_SUBSTITUTIONS'],
[(r'Alexander\ Todorov', 'atodorov')] +
default_slug_regex_subs)
self.assertEqual(
settings.get("SLUG_REGEX_SUBSTITUTIONS"),
[(r"C\+\+", "cpp")] + [(r"(?u)\A\s*", ""), (r"(?u)\s*\Z", "")],
)
self.assertEqual(
settings["AUTHOR_REGEX_SUBSTITUTIONS"],
[(r"Alexander\ Todorov", "atodorov")] + default_slug_regex_subs,
)
def test_deprecated_slug_substitutions_from_file(self):
# This is equivalent to reading a settings file that has
# SLUG_SUBSTITUTIONS defined but no SLUG_REGEX_SUBSTITUTIONS.
settings = read_settings(None, override={
'SLUG_SUBSTITUTIONS': [('C++', 'cpp')]
})
self.assertEqual(settings['SLUG_REGEX_SUBSTITUTIONS'],
[(r'C\+\+', 'cpp')] +
self.settings['SLUG_REGEX_SUBSTITUTIONS'])
self.assertNotIn('SLUG_SUBSTITUTIONS', settings)
settings = read_settings(
None, override={"SLUG_SUBSTITUTIONS": [("C++", "cpp")]}
)
self.assertEqual(
settings["SLUG_REGEX_SUBSTITUTIONS"],
[(r"C\+\+", "cpp")] + self.settings["SLUG_REGEX_SUBSTITUTIONS"],
)
self.assertNotIn("SLUG_SUBSTITUTIONS", settings)

View file

@ -4,7 +4,6 @@ from pelican.tests.support import unittest
class TestSuiteTest(unittest.TestCase):
def test_error_on_warning(self):
with self.assertRaises(UserWarning):
warnings.warn('test warning')
warnings.warn("test warning")

View file

@ -5,22 +5,22 @@ from pelican.urlwrappers import Author, Category, Tag, URLWrapper
class TestURLWrapper(unittest.TestCase):
def test_ordering(self):
# URLWrappers are sorted by name
wrapper_a = URLWrapper(name='first', settings={})
wrapper_b = URLWrapper(name='last', settings={})
wrapper_a = URLWrapper(name="first", settings={})
wrapper_b = URLWrapper(name="last", settings={})
self.assertFalse(wrapper_a > wrapper_b)
self.assertFalse(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b)
self.assertTrue(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b)
self.assertTrue(wrapper_a < wrapper_b)
wrapper_b.name = 'first'
wrapper_b.name = "first"
self.assertFalse(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b)
self.assertTrue(wrapper_a == wrapper_b)
self.assertFalse(wrapper_a != wrapper_b)
self.assertTrue(wrapper_a <= wrapper_b)
self.assertFalse(wrapper_a < wrapper_b)
wrapper_a.name = 'last'
wrapper_a.name = "last"
self.assertTrue(wrapper_a > wrapper_b)
self.assertTrue(wrapper_a >= wrapper_b)
self.assertFalse(wrapper_a == wrapper_b)
@ -29,57 +29,68 @@ class TestURLWrapper(unittest.TestCase):
self.assertFalse(wrapper_a < wrapper_b)
def test_equality(self):
tag = Tag('test', settings={})
cat = Category('test', settings={})
author = Author('test', settings={})
tag = Tag("test", settings={})
cat = Category("test", settings={})
author = Author("test", settings={})
# same name, but different class
self.assertNotEqual(tag, cat)
self.assertNotEqual(tag, author)
# should be equal vs text representing the same name
self.assertEqual(tag, 'test')
self.assertEqual(tag, "test")
# should not be equal vs binary
self.assertNotEqual(tag, b'test')
self.assertNotEqual(tag, b"test")
# Tags describing the same should be equal
tag_equal = Tag('Test', settings={})
tag_equal = Tag("Test", settings={})
self.assertEqual(tag, tag_equal)
# Author describing the same should be equal
author_equal = Author('Test', settings={})
author_equal = Author("Test", settings={})
self.assertEqual(author, author_equal)
cat_ascii = Category('指導書', settings={})
self.assertEqual(cat_ascii, 'zhi dao shu')
cat_ascii = Category("指導書", settings={})
self.assertEqual(cat_ascii, "zhi dao shu")
def test_slugify_with_substitutions_and_dots(self):
tag = Tag('Tag Dot', settings={'TAG_REGEX_SUBSTITUTIONS': [
('Tag Dot', 'tag.dot'),
]})
cat = Category('Category Dot',
settings={'CATEGORY_REGEX_SUBSTITUTIONS': [
('Category Dot', 'cat.dot'),
]})
tag = Tag(
"Tag Dot",
settings={
"TAG_REGEX_SUBSTITUTIONS": [
("Tag Dot", "tag.dot"),
]
},
)
cat = Category(
"Category Dot",
settings={
"CATEGORY_REGEX_SUBSTITUTIONS": [
("Category Dot", "cat.dot"),
]
},
)
self.assertEqual(tag.slug, 'tag.dot')
self.assertEqual(cat.slug, 'cat.dot')
self.assertEqual(tag.slug, "tag.dot")
self.assertEqual(cat.slug, "cat.dot")
def test_author_slug_substitutions(self):
settings = {'AUTHOR_REGEX_SUBSTITUTIONS': [
('Alexander Todorov', 'atodorov'),
('Krasimir Tsonev', 'krasimir'),
(r'[^\w\s-]', ''),
(r'(?u)\A\s*', ''),
(r'(?u)\s*\Z', ''),
(r'[-\s]+', '-'),
]}
settings = {
"AUTHOR_REGEX_SUBSTITUTIONS": [
("Alexander Todorov", "atodorov"),
("Krasimir Tsonev", "krasimir"),
(r"[^\w\s-]", ""),
(r"(?u)\A\s*", ""),
(r"(?u)\s*\Z", ""),
(r"[-\s]+", "-"),
]
}
author1 = Author('Mr. Senko', settings=settings)
author2 = Author('Alexander Todorov', settings=settings)
author3 = Author('Krasimir Tsonev', settings=settings)
author1 = Author("Mr. Senko", settings=settings)
author2 = Author("Alexander Todorov", settings=settings)
author3 = Author("Krasimir Tsonev", settings=settings)
self.assertEqual(author1.slug, 'mr-senko')
self.assertEqual(author2.slug, 'atodorov')
self.assertEqual(author3.slug, 'krasimir')
self.assertEqual(author1.slug, "mr-senko")
self.assertEqual(author2.slug, "atodorov")
self.assertEqual(author3.slug, "krasimir")

File diff suppressed because it is too large Load diff

View file

@ -49,4 +49,4 @@ del {text-decoration: line-through;}
table {
border-collapse: collapse;
border-spacing: 0;
}
}

View file

@ -5,4 +5,3 @@
{% block content_title %}
<h1>Articles by {{ author }}</h1>
{% endblock %}

View file

@ -5,4 +5,3 @@
{% block content_title %}
<h1>Articles in the {{ category }} category</h1>
{% endblock %}

File diff suppressed because it is too large Load diff

View file

@ -19,6 +19,7 @@ except ImportError:
try:
import tzlocal
if hasattr(tzlocal.get_localzone(), "zone"):
_DEFAULT_TIMEZONE = tzlocal.get_localzone().zone
else:
@ -28,55 +29,51 @@ except ModuleNotFoundError:
from pelican import __version__
locale.setlocale(locale.LC_ALL, '')
locale.setlocale(locale.LC_ALL, "")
try:
_DEFAULT_LANGUAGE = locale.getlocale()[0]
except ValueError:
# Don't fail on macosx: "unknown locale: UTF-8"
_DEFAULT_LANGUAGE = None
if _DEFAULT_LANGUAGE is None:
_DEFAULT_LANGUAGE = 'en'
_DEFAULT_LANGUAGE = "en"
else:
_DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split('_')[0]
_DEFAULT_LANGUAGE = _DEFAULT_LANGUAGE.split("_")[0]
_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"templates")
_TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
_jinja_env = Environment(
loader=FileSystemLoader(_TEMPLATES_DIR),
trim_blocks=True,
)
_GITHUB_PAGES_BRANCHES = {
'personal': 'main',
'project': 'gh-pages'
}
_GITHUB_PAGES_BRANCHES = {"personal": "main", "project": "gh-pages"}
CONF = {
'pelican': 'pelican',
'pelicanopts': '',
'basedir': os.curdir,
'ftp_host': 'localhost',
'ftp_user': 'anonymous',
'ftp_target_dir': '/',
'ssh_host': 'localhost',
'ssh_port': 22,
'ssh_user': 'root',
'ssh_target_dir': '/var/www',
's3_bucket': 'my_s3_bucket',
'cloudfiles_username': 'my_rackspace_username',
'cloudfiles_api_key': 'my_rackspace_api_key',
'cloudfiles_container': 'my_cloudfiles_container',
'dropbox_dir': '~/Dropbox/Public/',
'github_pages_branch': _GITHUB_PAGES_BRANCHES['project'],
'default_pagination': 10,
'siteurl': '',
'lang': _DEFAULT_LANGUAGE,
'timezone': _DEFAULT_TIMEZONE
"pelican": "pelican",
"pelicanopts": "",
"basedir": os.curdir,
"ftp_host": "localhost",
"ftp_user": "anonymous",
"ftp_target_dir": "/",
"ssh_host": "localhost",
"ssh_port": 22,
"ssh_user": "root",
"ssh_target_dir": "/var/www",
"s3_bucket": "my_s3_bucket",
"cloudfiles_username": "my_rackspace_username",
"cloudfiles_api_key": "my_rackspace_api_key",
"cloudfiles_container": "my_cloudfiles_container",
"dropbox_dir": "~/Dropbox/Public/",
"github_pages_branch": _GITHUB_PAGES_BRANCHES["project"],
"default_pagination": 10,
"siteurl": "",
"lang": _DEFAULT_LANGUAGE,
"timezone": _DEFAULT_TIMEZONE,
}
# url for list of valid timezones
_TZ_URL = 'https://en.wikipedia.org/wiki/List_of_tz_database_time_zones'
_TZ_URL = "https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
# Create a 'marked' default path, to determine if someone has supplied
@ -90,12 +87,12 @@ _DEFAULT_PATH = _DEFAULT_PATH_TYPE(os.curdir)
def ask(question, answer=str, default=None, length=None):
if answer == str:
r = ''
r = ""
while True:
if default:
r = input('> {} [{}] '.format(question, default))
r = input("> {} [{}] ".format(question, default))
else:
r = input('> {} '.format(question))
r = input("> {} ".format(question))
r = r.strip()
@ -104,10 +101,10 @@ def ask(question, answer=str, default=None, length=None):
r = default
break
else:
print('You must enter something')
print("You must enter something")
else:
if length and len(r) != length:
print('Entry must be {} characters long'.format(length))
print("Entry must be {} characters long".format(length))
else:
break
@ -117,18 +114,18 @@ def ask(question, answer=str, default=None, length=None):
r = None
while True:
if default is True:
r = input('> {} (Y/n) '.format(question))
r = input("> {} (Y/n) ".format(question))
elif default is False:
r = input('> {} (y/N) '.format(question))
r = input("> {} (y/N) ".format(question))
else:
r = input('> {} (y/n) '.format(question))
r = input("> {} (y/n) ".format(question))
r = r.strip().lower()
if r in ('y', 'yes'):
if r in ("y", "yes"):
r = True
break
elif r in ('n', 'no'):
elif r in ("n", "no"):
r = False
break
elif not r:
@ -141,9 +138,9 @@ def ask(question, answer=str, default=None, length=None):
r = None
while True:
if default:
r = input('> {} [{}] '.format(question, default))
r = input("> {} [{}] ".format(question, default))
else:
r = input('> {} '.format(question))
r = input("> {} ".format(question))
r = r.strip()
@ -155,11 +152,10 @@ def ask(question, answer=str, default=None, length=None):
r = int(r)
break
except ValueError:
print('You must enter an integer')
print("You must enter an integer")
return r
else:
raise NotImplementedError(
'Argument `answer` must be str, bool, or integer')
raise NotImplementedError("Argument `answer` must be str, bool, or integer")
def ask_timezone(question, default, tzurl):
@ -178,162 +174,227 @@ def ask_timezone(question, default, tzurl):
def render_jinja_template(tmpl_name: str, tmpl_vars: Mapping, target_path: str):
try:
with open(os.path.join(CONF['basedir'], target_path),
'w', encoding='utf-8') as fd:
with open(
os.path.join(CONF["basedir"], target_path), "w", encoding="utf-8"
) as fd:
_template = _jinja_env.get_template(tmpl_name)
fd.write(_template.render(**tmpl_vars))
except OSError as e:
print('Error: {}'.format(e))
print("Error: {}".format(e))
def main():
parser = argparse.ArgumentParser(
description="A kickstarter for Pelican",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--path', default=_DEFAULT_PATH,
help="The path to generate the blog into")
parser.add_argument('-t', '--title', metavar="title",
help='Set the title of the website')
parser.add_argument('-a', '--author', metavar="author",
help='Set the author name of the website')
parser.add_argument('-l', '--lang', metavar="lang",
help='Set the default web site language')
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-p", "--path", default=_DEFAULT_PATH, help="The path to generate the blog into"
)
parser.add_argument(
"-t", "--title", metavar="title", help="Set the title of the website"
)
parser.add_argument(
"-a", "--author", metavar="author", help="Set the author name of the website"
)
parser.add_argument(
"-l", "--lang", metavar="lang", help="Set the default web site language"
)
args = parser.parse_args()
print('''Welcome to pelican-quickstart v{v}.
print(
"""Welcome to pelican-quickstart v{v}.
This script will help you create a new Pelican-based website.
Please answer the following questions so this script can generate the files
needed by Pelican.
'''.format(v=__version__))
""".format(v=__version__)
)
project = os.path.join(
os.environ.get('VIRTUAL_ENV', os.curdir), '.project')
no_path_was_specified = hasattr(args.path, 'is_default_path')
project = os.path.join(os.environ.get("VIRTUAL_ENV", os.curdir), ".project")
no_path_was_specified = hasattr(args.path, "is_default_path")
if os.path.isfile(project) and no_path_was_specified:
CONF['basedir'] = open(project).read().rstrip("\n")
print('Using project associated with current virtual environment. '
'Will save to:\n%s\n' % CONF['basedir'])
CONF["basedir"] = open(project).read().rstrip("\n")
print(
"Using project associated with current virtual environment. "
"Will save to:\n%s\n" % CONF["basedir"]
)
else:
CONF['basedir'] = os.path.abspath(os.path.expanduser(
ask('Where do you want to create your new web site?',
answer=str, default=args.path)))
CONF["basedir"] = os.path.abspath(
os.path.expanduser(
ask(
"Where do you want to create your new web site?",
answer=str,
default=args.path,
)
)
)
CONF['sitename'] = ask('What will be the title of this web site?',
answer=str, default=args.title)
CONF['author'] = ask('Who will be the author of this web site?',
answer=str, default=args.author)
CONF['lang'] = ask('What will be the default language of this web site?',
str, args.lang or CONF['lang'], 2)
CONF["sitename"] = ask(
"What will be the title of this web site?", answer=str, default=args.title
)
CONF["author"] = ask(
"Who will be the author of this web site?", answer=str, default=args.author
)
CONF["lang"] = ask(
"What will be the default language of this web site?",
str,
args.lang or CONF["lang"],
2,
)
if ask('Do you want to specify a URL prefix? e.g., https://example.com ',
answer=bool, default=True):
CONF['siteurl'] = ask('What is your URL prefix? (see '
'above example; no trailing slash)',
str, CONF['siteurl'])
if ask(
"Do you want to specify a URL prefix? e.g., https://example.com ",
answer=bool,
default=True,
):
CONF["siteurl"] = ask(
"What is your URL prefix? (see " "above example; no trailing slash)",
str,
CONF["siteurl"],
)
CONF['with_pagination'] = ask('Do you want to enable article pagination?',
bool, bool(CONF['default_pagination']))
CONF["with_pagination"] = ask(
"Do you want to enable article pagination?",
bool,
bool(CONF["default_pagination"]),
)
if CONF['with_pagination']:
CONF['default_pagination'] = ask('How many articles per page '
'do you want?',
int, CONF['default_pagination'])
if CONF["with_pagination"]:
CONF["default_pagination"] = ask(
"How many articles per page " "do you want?",
int,
CONF["default_pagination"],
)
else:
CONF['default_pagination'] = False
CONF["default_pagination"] = False
CONF['timezone'] = ask_timezone('What is your time zone?',
CONF['timezone'], _TZ_URL)
CONF["timezone"] = ask_timezone(
"What is your time zone?", CONF["timezone"], _TZ_URL
)
automation = ask('Do you want to generate a tasks.py/Makefile '
'to automate generation and publishing?', bool, True)
automation = ask(
"Do you want to generate a tasks.py/Makefile "
"to automate generation and publishing?",
bool,
True,
)
if automation:
if ask('Do you want to upload your website using FTP?',
answer=bool, default=False):
CONF['ftp'] = True,
CONF['ftp_host'] = ask('What is the hostname of your FTP server?',
str, CONF['ftp_host'])
CONF['ftp_user'] = ask('What is your username on that server?',
str, CONF['ftp_user'])
CONF['ftp_target_dir'] = ask('Where do you want to put your '
'web site on that server?',
str, CONF['ftp_target_dir'])
if ask('Do you want to upload your website using SSH?',
answer=bool, default=False):
CONF['ssh'] = True,
CONF['ssh_host'] = ask('What is the hostname of your SSH server?',
str, CONF['ssh_host'])
CONF['ssh_port'] = ask('What is the port of your SSH server?',
int, CONF['ssh_port'])
CONF['ssh_user'] = ask('What is your username on that server?',
str, CONF['ssh_user'])
CONF['ssh_target_dir'] = ask('Where do you want to put your '
'web site on that server?',
str, CONF['ssh_target_dir'])
if ask(
"Do you want to upload your website using FTP?", answer=bool, default=False
):
CONF["ftp"] = (True,)
CONF["ftp_host"] = ask(
"What is the hostname of your FTP server?", str, CONF["ftp_host"]
)
CONF["ftp_user"] = ask(
"What is your username on that server?", str, CONF["ftp_user"]
)
CONF["ftp_target_dir"] = ask(
"Where do you want to put your " "web site on that server?",
str,
CONF["ftp_target_dir"],
)
if ask(
"Do you want to upload your website using SSH?", answer=bool, default=False
):
CONF["ssh"] = (True,)
CONF["ssh_host"] = ask(
"What is the hostname of your SSH server?", str, CONF["ssh_host"]
)
CONF["ssh_port"] = ask(
"What is the port of your SSH server?", int, CONF["ssh_port"]
)
CONF["ssh_user"] = ask(
"What is your username on that server?", str, CONF["ssh_user"]
)
CONF["ssh_target_dir"] = ask(
"Where do you want to put your " "web site on that server?",
str,
CONF["ssh_target_dir"],
)
if ask('Do you want to upload your website using Dropbox?',
answer=bool, default=False):
CONF['dropbox'] = True,
CONF['dropbox_dir'] = ask('Where is your Dropbox directory?',
str, CONF['dropbox_dir'])
if ask(
"Do you want to upload your website using Dropbox?",
answer=bool,
default=False,
):
CONF["dropbox"] = (True,)
CONF["dropbox_dir"] = ask(
"Where is your Dropbox directory?", str, CONF["dropbox_dir"]
)
if ask('Do you want to upload your website using S3?',
answer=bool, default=False):
CONF['s3'] = True,
CONF['s3_bucket'] = ask('What is the name of your S3 bucket?',
str, CONF['s3_bucket'])
if ask(
"Do you want to upload your website using S3?", answer=bool, default=False
):
CONF["s3"] = (True,)
CONF["s3_bucket"] = ask(
"What is the name of your S3 bucket?", str, CONF["s3_bucket"]
)
if ask('Do you want to upload your website using '
'Rackspace Cloud Files?', answer=bool, default=False):
CONF['cloudfiles'] = True,
CONF['cloudfiles_username'] = ask('What is your Rackspace '
'Cloud username?', str,
CONF['cloudfiles_username'])
CONF['cloudfiles_api_key'] = ask('What is your Rackspace '
'Cloud API key?', str,
CONF['cloudfiles_api_key'])
CONF['cloudfiles_container'] = ask('What is the name of your '
'Cloud Files container?',
str,
CONF['cloudfiles_container'])
if ask(
"Do you want to upload your website using " "Rackspace Cloud Files?",
answer=bool,
default=False,
):
CONF["cloudfiles"] = (True,)
CONF["cloudfiles_username"] = ask(
"What is your Rackspace " "Cloud username?",
str,
CONF["cloudfiles_username"],
)
CONF["cloudfiles_api_key"] = ask(
"What is your Rackspace " "Cloud API key?",
str,
CONF["cloudfiles_api_key"],
)
CONF["cloudfiles_container"] = ask(
"What is the name of your " "Cloud Files container?",
str,
CONF["cloudfiles_container"],
)
if ask('Do you want to upload your website using GitHub Pages?',
answer=bool, default=False):
CONF['github'] = True,
if ask('Is this your personal page (username.github.io)?',
answer=bool, default=False):
CONF['github_pages_branch'] = \
_GITHUB_PAGES_BRANCHES['personal']
if ask(
"Do you want to upload your website using GitHub Pages?",
answer=bool,
default=False,
):
CONF["github"] = (True,)
if ask(
"Is this your personal page (username.github.io)?",
answer=bool,
default=False,
):
CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["personal"]
else:
CONF['github_pages_branch'] = \
_GITHUB_PAGES_BRANCHES['project']
CONF["github_pages_branch"] = _GITHUB_PAGES_BRANCHES["project"]
try:
os.makedirs(os.path.join(CONF['basedir'], 'content'))
os.makedirs(os.path.join(CONF["basedir"], "content"))
except OSError as e:
print('Error: {}'.format(e))
print("Error: {}".format(e))
try:
os.makedirs(os.path.join(CONF['basedir'], 'output'))
os.makedirs(os.path.join(CONF["basedir"], "output"))
except OSError as e:
print('Error: {}'.format(e))
print("Error: {}".format(e))
conf_python = dict()
for key, value in CONF.items():
conf_python[key] = repr(value)
render_jinja_template('pelicanconf.py.jinja2', conf_python, 'pelicanconf.py')
render_jinja_template("pelicanconf.py.jinja2", conf_python, "pelicanconf.py")
render_jinja_template('publishconf.py.jinja2', CONF, 'publishconf.py')
render_jinja_template("publishconf.py.jinja2", CONF, "publishconf.py")
if automation:
render_jinja_template('tasks.py.jinja2', CONF, 'tasks.py')
render_jinja_template('Makefile.jinja2', CONF, 'Makefile')
render_jinja_template("tasks.py.jinja2", CONF, "tasks.py")
render_jinja_template("Makefile.jinja2", CONF, "Makefile")
print('Done. Your new project is available at %s' % CONF['basedir'])
print("Done. Your new project is available at %s" % CONF["basedir"])
if __name__ == "__main__":

View file

@ -8,7 +8,7 @@ import sys
def err(msg, die=None):
"""Print an error message and exits if an exit code is given"""
sys.stderr.write(msg + '\n')
sys.stderr.write(msg + "\n")
if die:
sys.exit(die if isinstance(die, int) else 1)
@ -16,62 +16,96 @@ def err(msg, die=None):
try:
import pelican
except ImportError:
err('Cannot import pelican.\nYou must '
'install Pelican in order to run this script.',
-1)
err(
"Cannot import pelican.\nYou must "
"install Pelican in order to run this script.",
-1,
)
global _THEMES_PATH
_THEMES_PATH = os.path.join(
os.path.dirname(
os.path.abspath(pelican.__file__)
),
'themes'
os.path.dirname(os.path.abspath(pelican.__file__)), "themes"
)
__version__ = '0.2'
_BUILTIN_THEMES = ['simple', 'notmyidea']
__version__ = "0.2"
_BUILTIN_THEMES = ["simple", "notmyidea"]
def main():
"""Main function"""
parser = argparse.ArgumentParser(
description="""Install themes for Pelican""")
parser = argparse.ArgumentParser(description="""Install themes for Pelican""")
excl = parser.add_mutually_exclusive_group()
excl.add_argument(
'-l', '--list', dest='action', action="store_const", const='list',
help="Show the themes already installed and exit")
"-l",
"--list",
dest="action",
action="store_const",
const="list",
help="Show the themes already installed and exit",
)
excl.add_argument(
'-p', '--path', dest='action', action="store_const", const='path',
help="Show the themes path and exit")
"-p",
"--path",
dest="action",
action="store_const",
const="path",
help="Show the themes path and exit",
)
excl.add_argument(
'-V', '--version', action='version',
version='pelican-themes v{}'.format(__version__),
help='Print the version of this script')
"-V",
"--version",
action="version",
version="pelican-themes v{}".format(__version__),
help="Print the version of this script",
)
parser.add_argument(
'-i', '--install', dest='to_install', nargs='+', metavar="theme path",
help='The themes to install')
"-i",
"--install",
dest="to_install",
nargs="+",
metavar="theme path",
help="The themes to install",
)
parser.add_argument(
'-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
help='The themes to remove')
"-r",
"--remove",
dest="to_remove",
nargs="+",
metavar="theme name",
help="The themes to remove",
)
parser.add_argument(
'-U', '--upgrade', dest='to_upgrade', nargs='+',
metavar="theme path", help='The themes to upgrade')
"-U",
"--upgrade",
dest="to_upgrade",
nargs="+",
metavar="theme path",
help="The themes to upgrade",
)
parser.add_argument(
'-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
"-s",
"--symlink",
dest="to_symlink",
nargs="+",
metavar="theme path",
help="Same as `--install', but create a symbolic link instead of "
"copying the theme. Useful for theme development")
"copying the theme. Useful for theme development",
)
parser.add_argument(
'-c', '--clean', dest='clean', action="store_true",
help="Remove the broken symbolic links of the theme path")
"-c",
"--clean",
dest="clean",
action="store_true",
help="Remove the broken symbolic links of the theme path",
)
parser.add_argument(
'-v', '--verbose', dest='verbose',
action="store_true",
help="Verbose output")
"-v", "--verbose", dest="verbose", action="store_true", help="Verbose output"
)
args = parser.parse_args()
@ -79,46 +113,46 @@ def main():
to_sym = args.to_symlink or args.clean
if args.action:
if args.action == 'list':
if args.action == "list":
list_themes(args.verbose)
elif args.action == 'path':
elif args.action == "path":
print(_THEMES_PATH)
elif to_install or args.to_remove or to_sym:
if args.to_remove:
if args.verbose:
print('Removing themes...')
print("Removing themes...")
for i in args.to_remove:
remove(i, v=args.verbose)
if args.to_install:
if args.verbose:
print('Installing themes...')
print("Installing themes...")
for i in args.to_install:
install(i, v=args.verbose)
if args.to_upgrade:
if args.verbose:
print('Upgrading themes...')
print("Upgrading themes...")
for i in args.to_upgrade:
install(i, v=args.verbose, u=True)
if args.to_symlink:
if args.verbose:
print('Linking themes...')
print("Linking themes...")
for i in args.to_symlink:
symlink(i, v=args.verbose)
if args.clean:
if args.verbose:
print('Cleaning the themes directory...')
print("Cleaning the themes directory...")
clean(v=args.verbose)
else:
print('No argument given... exiting.')
print("No argument given... exiting.")
def themes():
@ -142,7 +176,7 @@ def list_themes(v=False):
if v:
print(theme_path + (" (symbolic link to `" + link_target + "')"))
else:
print(theme_path + '@')
print(theme_path + "@")
else:
print(theme_path)
@ -150,51 +184,52 @@ def list_themes(v=False):
def remove(theme_name, v=False):
"""Removes a theme"""
theme_name = theme_name.replace('/', '')
theme_name = theme_name.replace("/", "")
target = os.path.join(_THEMES_PATH, theme_name)
if theme_name in _BUILTIN_THEMES:
err(theme_name + ' is a builtin theme.\n'
'You cannot remove a builtin theme with this script, '
'remove it by hand if you want.')
err(
theme_name + " is a builtin theme.\n"
"You cannot remove a builtin theme with this script, "
"remove it by hand if you want."
)
elif os.path.islink(target):
if v:
print('Removing link `' + target + "'")
print("Removing link `" + target + "'")
os.remove(target)
elif os.path.isdir(target):
if v:
print('Removing directory `' + target + "'")
print("Removing directory `" + target + "'")
shutil.rmtree(target)
elif os.path.exists(target):
err(target + ' : not a valid theme')
err(target + " : not a valid theme")
else:
err(target + ' : no such file or directory')
err(target + " : no such file or directory")
def install(path, v=False, u=False):
"""Installs a theme"""
if not os.path.exists(path):
err(path + ' : no such file or directory')
err(path + " : no such file or directory")
elif not os.path.isdir(path):
err(path + ' : not a directory')
err(path + " : not a directory")
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
exists = os.path.exists(theme_path)
if exists and not u:
err(path + ' : already exists')
err(path + " : already exists")
elif exists:
remove(theme_name, v)
install(path, v)
else:
if v:
print("Copying '{p}' to '{t}' ...".format(p=path,
t=theme_path))
print("Copying '{p}' to '{t}' ...".format(p=path, t=theme_path))
try:
shutil.copytree(path, theme_path)
try:
if os.name == 'posix':
if os.name == "posix":
for root, dirs, files in os.walk(theme_path):
for d in dirs:
dname = os.path.join(root, d)
@ -203,35 +238,41 @@ def install(path, v=False, u=False):
fname = os.path.join(root, f)
os.chmod(fname, 420) # 0o644
except OSError as e:
err("Cannot change permissions of files "
"or directory in `{r}':\n{e}".format(r=theme_path,
e=str(e)),
die=False)
err(
"Cannot change permissions of files "
"or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)),
die=False,
)
except Exception as e:
err("Cannot copy `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)))
err(
"Cannot copy `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)
)
)
def symlink(path, v=False):
"""Symbolically link a theme"""
if not os.path.exists(path):
err(path + ' : no such file or directory')
err(path + " : no such file or directory")
elif not os.path.isdir(path):
err(path + ' : not a directory')
err(path + " : not a directory")
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
if os.path.exists(theme_path):
err(path + ' : already exists')
err(path + " : already exists")
else:
if v:
print("Linking `{p}' to `{t}' ...".format(
p=path, t=theme_path))
print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
try:
os.symlink(path, theme_path)
except Exception as e:
err("Cannot link `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)))
err(
"Cannot link `{p}' to `{t}':\n{e}".format(
p=path, t=theme_path, e=str(e)
)
)
def is_broken_link(path):
@ -247,11 +288,11 @@ def clean(v=False):
path = os.path.join(_THEMES_PATH, path)
if os.path.islink(path) and is_broken_link(path):
if v:
print('Removing {}'.format(path))
print("Removing {}".format(path))
try:
os.remove(path)
except OSError:
print('Error: cannot remove {}'.format(path))
print("Error: cannot remove {}".format(path))
else:
c += 1

View file

@ -31,17 +31,16 @@ class URLWrapper:
@property
def slug(self):
if self._slug is None:
class_key = '{}_REGEX_SUBSTITUTIONS'.format(
self.__class__.__name__.upper())
class_key = "{}_REGEX_SUBSTITUTIONS".format(self.__class__.__name__.upper())
regex_subs = self.settings.get(
class_key,
self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False)
class_key, self.settings.get("SLUG_REGEX_SUBSTITUTIONS", [])
)
preserve_case = self.settings.get("SLUGIFY_PRESERVE_CASE", False)
self._slug = slugify(
self.name,
regex_subs=regex_subs,
preserve_case=preserve_case,
use_unicode=self.settings.get('SLUGIFY_USE_UNICODE', False)
use_unicode=self.settings.get("SLUGIFY_USE_UNICODE", False),
)
return self._slug
@ -53,26 +52,26 @@ class URLWrapper:
def as_dict(self):
d = self.__dict__
d['name'] = self.name
d['slug'] = self.slug
d["name"] = self.name
d["slug"] = self.slug
return d
def __hash__(self):
return hash(self.slug)
def _normalize_key(self, key):
class_key = '{}_REGEX_SUBSTITUTIONS'.format(
self.__class__.__name__.upper())
class_key = "{}_REGEX_SUBSTITUTIONS".format(self.__class__.__name__.upper())
regex_subs = self.settings.get(
class_key,
self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
use_unicode = self.settings.get('SLUGIFY_USE_UNICODE', False)
preserve_case = self.settings.get('SLUGIFY_PRESERVE_CASE', False)
class_key, self.settings.get("SLUG_REGEX_SUBSTITUTIONS", [])
)
use_unicode = self.settings.get("SLUGIFY_USE_UNICODE", False)
preserve_case = self.settings.get("SLUGIFY_PRESERVE_CASE", False)
return slugify(
key,
regex_subs=regex_subs,
preserve_case=preserve_case,
use_unicode=use_unicode)
use_unicode=use_unicode,
)
def __eq__(self, other):
if isinstance(other, self.__class__):
@ -99,7 +98,7 @@ class URLWrapper:
return self.name
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, repr(self._name))
return "<{} {}>".format(type(self).__name__, repr(self._name))
def _from_settings(self, key, get_page_name=False):
"""Returns URL information as defined in settings.
@ -114,7 +113,7 @@ class URLWrapper:
if isinstance(value, pathlib.Path):
value = str(value)
if not isinstance(value, str):
logger.warning('%s is set to %s', setting, value)
logger.warning("%s is set to %s", setting, value)
return value
else:
if get_page_name:
@ -122,10 +121,11 @@ class URLWrapper:
else:
return value.format(**self.as_dict())
page_name = property(functools.partial(_from_settings, key='URL',
get_page_name=True))
url = property(functools.partial(_from_settings, key='URL'))
save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
page_name = property(
functools.partial(_from_settings, key="URL", get_page_name=True)
)
url = property(functools.partial(_from_settings, key="URL"))
save_as = property(functools.partial(_from_settings, key="SAVE_AS"))
class Category(URLWrapper):

View file

@ -32,38 +32,37 @@ logger = logging.getLogger(__name__)
def sanitised_join(base_directory, *parts):
joined = posixize_path(
os.path.abspath(os.path.join(base_directory, *parts)))
joined = posixize_path(os.path.abspath(os.path.join(base_directory, *parts)))
base = posixize_path(os.path.abspath(base_directory))
if not joined.startswith(base):
raise RuntimeError(
"Attempted to break out of output directory to {}".format(
joined
)
"Attempted to break out of output directory to {}".format(joined)
)
return joined
def strftime(date, date_format):
'''
"""
Enhanced replacement for built-in strftime with zero stripping
This works by 'grabbing' possible format strings (those starting with %),
formatting them with the date, stripping any leading zeros if - prefix is
used and replacing formatted output back.
'''
"""
def strip_zeros(x):
return x.lstrip('0') or '0'
return x.lstrip("0") or "0"
# includes ISO date parameters added by Python 3.6
c89_directives = 'aAbBcdfGHIjmMpSUuVwWxXyYzZ%'
c89_directives = "aAbBcdfGHIjmMpSUuVwWxXyYzZ%"
# grab candidate format options
format_options = '%[-]?.'
format_options = "%[-]?."
candidates = re.findall(format_options, date_format)
# replace candidates with placeholders for later % formatting
template = re.sub(format_options, '%s', date_format)
template = re.sub(format_options, "%s", date_format)
formatted_candidates = []
for candidate in candidates:
@ -72,7 +71,7 @@ def strftime(date, date_format):
# check for '-' prefix
if len(candidate) == 3:
# '-' prefix
candidate = '%{}'.format(candidate[-1])
candidate = "%{}".format(candidate[-1])
conversion = strip_zeros
else:
conversion = None
@ -95,10 +94,10 @@ def strftime(date, date_format):
class SafeDatetime(datetime.datetime):
'''Subclass of datetime that works with utf-8 format strings on PY2'''
"""Subclass of datetime that works with utf-8 format strings on PY2"""
def strftime(self, fmt, safe=True):
'''Uses our custom strftime if supposed to be *safe*'''
"""Uses our custom strftime if supposed to be *safe*"""
if safe:
return strftime(self, fmt)
else:
@ -106,22 +105,21 @@ class SafeDatetime(datetime.datetime):
class DateFormatter:
'''A date formatter object used as a jinja filter
"""A date formatter object used as a jinja filter
Uses the `strftime` implementation and makes sure jinja uses the locale
defined in LOCALE setting
'''
"""
def __init__(self):
self.locale = locale.setlocale(locale.LC_TIME)
def __call__(self, date, date_format):
# on OSX, encoding from LC_CTYPE determines the unicode output in PY3
# make sure it's same as LC_TIME
with temporary_locale(self.locale, locale.LC_TIME), \
temporary_locale(self.locale, locale.LC_CTYPE):
with temporary_locale(self.locale, locale.LC_TIME), temporary_locale(
self.locale, locale.LC_CTYPE
):
formatted = strftime(date, date_format)
return formatted
@ -155,7 +153,7 @@ class memoized:
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
"""Support instance methods."""
fn = partial(self.__call__, obj)
fn.cache = self.cache
return fn
@ -177,17 +175,16 @@ def deprecated_attribute(old, new, since=None, remove=None, doc=None):
Note that the decorator needs a dummy method to attach to, but the
content of the dummy method is ignored.
"""
def _warn():
version = '.'.join(str(x) for x in since)
message = ['{} has been deprecated since {}'.format(old, version)]
version = ".".join(str(x) for x in since)
message = ["{} has been deprecated since {}".format(old, version)]
if remove:
version = '.'.join(str(x) for x in remove)
message.append(
' and will be removed by version {}'.format(version))
message.append('. Use {} instead.'.format(new))
logger.warning(''.join(message))
logger.debug(''.join(str(x) for x
in traceback.format_stack()))
version = ".".join(str(x) for x in remove)
message.append(" and will be removed by version {}".format(version))
message.append(". Use {} instead.".format(new))
logger.warning("".join(message))
logger.debug("".join(str(x) for x in traceback.format_stack()))
def fget(self):
_warn()
@ -208,21 +205,20 @@ def get_date(string):
If no format matches the given date, raise a ValueError.
"""
string = re.sub(' +', ' ', string)
default = SafeDatetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
string = re.sub(" +", " ", string)
default = SafeDatetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
try:
return dateutil.parser.parse(string, default=default)
except (TypeError, ValueError):
raise ValueError('{!r} is not a valid date'.format(string))
raise ValueError("{!r} is not a valid date".format(string))
@contextmanager
def pelican_open(filename, mode='r', strip_crs=(sys.platform == 'win32')):
def pelican_open(filename, mode="r", strip_crs=(sys.platform == "win32")):
"""Open a file and return its content"""
# utf-8-sig will clear any BOM if present
with open(filename, mode, encoding='utf-8-sig') as infile:
with open(filename, mode, encoding="utf-8-sig") as infile:
content = infile.read()
yield content
@ -244,7 +240,7 @@ def slugify(value, regex_subs=(), preserve_case=False, use_unicode=False):
def normalize_unicode(text):
# normalize text by compatibility composition
# see: https://en.wikipedia.org/wiki/Unicode_equivalence
return unicodedata.normalize('NFKC', text)
return unicodedata.normalize("NFKC", text)
# strip tags from value
value = Markup(value).striptags()
@ -259,10 +255,8 @@ def slugify(value, regex_subs=(), preserve_case=False, use_unicode=False):
# perform regex substitutions
for src, dst in regex_subs:
value = re.sub(
normalize_unicode(src),
normalize_unicode(dst),
value,
flags=re.IGNORECASE)
normalize_unicode(src), normalize_unicode(dst), value, flags=re.IGNORECASE
)
if not preserve_case:
value = value.lower()
@ -283,8 +277,7 @@ def copy(source, destination, ignores=None):
"""
def walk_error(err):
logger.warning("While copying %s: %s: %s",
source_, err.filename, err.strerror)
logger.warning("While copying %s: %s: %s", source_, err.filename, err.strerror)
source_ = os.path.abspath(os.path.expanduser(source))
destination_ = os.path.abspath(os.path.expanduser(destination))
@ -292,39 +285,40 @@ def copy(source, destination, ignores=None):
if ignores is None:
ignores = []
if any(fnmatch.fnmatch(os.path.basename(source), ignore)
for ignore in ignores):
logger.info('Not copying %s due to ignores', source_)
if any(fnmatch.fnmatch(os.path.basename(source), ignore) for ignore in ignores):
logger.info("Not copying %s due to ignores", source_)
return
if os.path.isfile(source_):
dst_dir = os.path.dirname(destination_)
if not os.path.exists(dst_dir):
logger.info('Creating directory %s', dst_dir)
logger.info("Creating directory %s", dst_dir)
os.makedirs(dst_dir)
logger.info('Copying %s to %s', source_, destination_)
logger.info("Copying %s to %s", source_, destination_)
copy_file(source_, destination_)
elif os.path.isdir(source_):
if not os.path.exists(destination_):
logger.info('Creating directory %s', destination_)
logger.info("Creating directory %s", destination_)
os.makedirs(destination_)
if not os.path.isdir(destination_):
logger.warning('Cannot copy %s (a directory) to %s (a file)',
source_, destination_)
logger.warning(
"Cannot copy %s (a directory) to %s (a file)", source_, destination_
)
return
for src_dir, subdirs, others in os.walk(source_, followlinks=True):
dst_dir = os.path.join(destination_,
os.path.relpath(src_dir, source_))
dst_dir = os.path.join(destination_, os.path.relpath(src_dir, source_))
subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i)
for i in ignores))
others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
for i in ignores))
subdirs[:] = (
s for s in subdirs if not any(fnmatch.fnmatch(s, i) for i in ignores)
)
others[:] = (
o for o in others if not any(fnmatch.fnmatch(o, i) for i in ignores)
)
if not os.path.isdir(dst_dir):
logger.info('Creating directory %s', dst_dir)
logger.info("Creating directory %s", dst_dir)
# Parent directories are known to exist, so 'mkdir' suffices.
os.mkdir(dst_dir)
@ -332,21 +326,24 @@ def copy(source, destination, ignores=None):
src_path = os.path.join(src_dir, o)
dst_path = os.path.join(dst_dir, o)
if os.path.isfile(src_path):
logger.info('Copying %s to %s', src_path, dst_path)
logger.info("Copying %s to %s", src_path, dst_path)
copy_file(src_path, dst_path)
else:
logger.warning('Skipped copy %s (not a file or '
'directory) to %s',
src_path, dst_path)
logger.warning(
"Skipped copy %s (not a file or " "directory) to %s",
src_path,
dst_path,
)
def copy_file(source, destination):
'''Copy a file'''
"""Copy a file"""
try:
shutil.copyfile(source, destination)
except OSError as e:
logger.warning("A problem occurred copying file %s to %s; %s",
source, destination, e)
logger.warning(
"A problem occurred copying file %s to %s; %s", source, destination, e
)
def clean_output_dir(path, retention):
@ -367,15 +364,15 @@ def clean_output_dir(path, retention):
for filename in os.listdir(path):
file = os.path.join(path, filename)
if any(filename == retain for retain in retention):
logger.debug("Skipping deletion; %s is on retention list: %s",
filename, file)
logger.debug(
"Skipping deletion; %s is on retention list: %s", filename, file
)
elif os.path.isdir(file):
try:
shutil.rmtree(file)
logger.debug("Deleted directory %s", file)
except Exception as e:
logger.error("Unable to delete directory %s; %s",
file, e)
logger.error("Unable to delete directory %s; %s", file, e)
elif os.path.isfile(file) or os.path.islink(file):
try:
os.remove(file)
@ -407,29 +404,31 @@ def posixize_path(rel_path):
"""Use '/' as path separator, so that source references,
like '{static}/foo/bar.jpg' or 'extras/favicon.ico',
will work on Windows as well as on Mac and Linux."""
return rel_path.replace(os.sep, '/')
return rel_path.replace(os.sep, "/")
class _HTMLWordTruncator(HTMLParser):
_word_regex = re.compile(r"{DBC}|(\w[\w'-]*)".format(
# DBC means CJK-like characters. An character can stand for a word.
DBC=("([\u4E00-\u9FFF])|" # CJK Unified Ideographs
"([\u3400-\u4DBF])|" # CJK Unified Ideographs Extension A
"([\uF900-\uFAFF])|" # CJK Compatibility Ideographs
"([\U00020000-\U0002A6DF])|" # CJK Unified Ideographs Extension B
"([\U0002F800-\U0002FA1F])|" # CJK Compatibility Ideographs Supplement
"([\u3040-\u30FF])|" # Hiragana and Katakana
"([\u1100-\u11FF])|" # Hangul Jamo
"([\uAC00-\uD7FF])|" # Hangul Compatibility Jamo
"([\u3130-\u318F])" # Hangul Syllables
)), re.UNICODE)
_word_prefix_regex = re.compile(r'\w', re.U)
_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area',
'hr', 'input')
_word_regex = re.compile(
r"{DBC}|(\w[\w'-]*)".format(
# DBC means CJK-like characters. An character can stand for a word.
DBC=(
"([\u4E00-\u9FFF])|" # CJK Unified Ideographs
"([\u3400-\u4DBF])|" # CJK Unified Ideographs Extension A
"([\uF900-\uFAFF])|" # CJK Compatibility Ideographs
"([\U00020000-\U0002A6DF])|" # CJK Unified Ideographs Extension B
"([\U0002F800-\U0002FA1F])|" # CJK Compatibility Ideographs Supplement
"([\u3040-\u30FF])|" # Hiragana and Katakana
"([\u1100-\u11FF])|" # Hangul Jamo
"([\uAC00-\uD7FF])|" # Hangul Compatibility Jamo
"([\u3130-\u318F])" # Hangul Syllables
)
),
re.UNICODE,
)
_word_prefix_regex = re.compile(r"\w", re.U)
_singlets = ("br", "col", "link", "base", "img", "param", "area", "hr", "input")
class TruncationCompleted(Exception):
def __init__(self, truncate_at):
super().__init__(truncate_at)
self.truncate_at = truncate_at
@ -455,7 +454,7 @@ class _HTMLWordTruncator(HTMLParser):
line_start = 0
lineno, line_offset = self.getpos()
for i in range(lineno - 1):
line_start = self.rawdata.index('\n', line_start) + 1
line_start = self.rawdata.index("\n", line_start) + 1
return line_start + line_offset
def add_word(self, word_end):
@ -482,7 +481,7 @@ class _HTMLWordTruncator(HTMLParser):
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
del self.open_tags[:i + 1]
del self.open_tags[: i + 1]
def handle_data(self, data):
word_end = 0
@ -531,7 +530,7 @@ class _HTMLWordTruncator(HTMLParser):
ref_end = offset + len(name) + 1
try:
if self.rawdata[ref_end] == ';':
if self.rawdata[ref_end] == ";":
ref_end += 1
except IndexError:
# We are at the end of the string and there's no ';'
@ -556,7 +555,7 @@ class _HTMLWordTruncator(HTMLParser):
codepoint = entities.name2codepoint[name]
char = chr(codepoint)
except KeyError:
char = ''
char = ""
self._handle_ref(name, char)
def handle_charref(self, name):
@ -567,17 +566,17 @@ class _HTMLWordTruncator(HTMLParser):
`#x2014`)
"""
try:
if name.startswith('x'):
if name.startswith("x"):
codepoint = int(name[1:], 16)
else:
codepoint = int(name)
char = chr(codepoint)
except (ValueError, OverflowError):
char = ''
self._handle_ref('#' + name, char)
char = ""
self._handle_ref("#" + name, char)
def truncate_html_words(s, num, end_text=''):
def truncate_html_words(s, num, end_text=""):
"""Truncates HTML to a certain number of words.
(not counting tags and comments). Closes opened tags if they were correctly
@ -588,23 +587,23 @@ def truncate_html_words(s, num, end_text='…'):
"""
length = int(num)
if length <= 0:
return ''
return ""
truncator = _HTMLWordTruncator(length)
truncator.feed(s)
if truncator.truncate_at is None:
return s
out = s[:truncator.truncate_at]
out = s[: truncator.truncate_at]
if end_text:
out += ' ' + end_text
out += " " + end_text
# Close any tags still open
for tag in truncator.open_tags:
out += '</%s>' % tag
out += "</%s>" % tag
# Return string
return out
def process_translations(content_list, translation_id=None):
""" Finds translations and returns them.
"""Finds translations and returns them.
For each content_list item, populates the 'translations' attribute, and
returns a tuple with two lists (index, translations). Index list includes
@ -632,19 +631,23 @@ def process_translations(content_list, translation_id=None):
try:
content_list.sort(key=attrgetter(*translation_id))
except TypeError:
raise TypeError('Cannot unpack {}, \'translation_id\' must be falsy, a'
' string or a collection of strings'
.format(translation_id))
raise TypeError(
"Cannot unpack {}, 'translation_id' must be falsy, a"
" string or a collection of strings".format(translation_id)
)
except AttributeError:
raise AttributeError('Cannot use {} as \'translation_id\', there '
'appear to be items without these metadata '
'attributes'.format(translation_id))
raise AttributeError(
"Cannot use {} as 'translation_id', there "
"appear to be items without these metadata "
"attributes".format(translation_id)
)
for id_vals, items in groupby(content_list, attrgetter(*translation_id)):
# prepare warning string
id_vals = (id_vals,) if len(translation_id) == 1 else id_vals
with_str = 'with' + ', '.join([' {} "{{}}"'] * len(translation_id))\
.format(*translation_id).format(*id_vals)
with_str = "with" + ", ".join([' {} "{{}}"'] * len(translation_id)).format(
*translation_id
).format(*id_vals)
items = list(items)
original_items = get_original_items(items, with_str)
@ -662,24 +665,24 @@ def get_original_items(items, with_str):
args = [len(items)]
args.extend(extra)
args.extend(x.source_path for x in items)
logger.warning('{}: {}'.format(msg, '\n%s' * len(items)), *args)
logger.warning("{}: {}".format(msg, "\n%s" * len(items)), *args)
# warn if several items have the same lang
for lang, lang_items in groupby(items, attrgetter('lang')):
for lang, lang_items in groupby(items, attrgetter("lang")):
lang_items = list(lang_items)
if len(lang_items) > 1:
_warn_source_paths('There are %s items "%s" with lang %s',
lang_items, with_str, lang)
_warn_source_paths(
'There are %s items "%s" with lang %s', lang_items, with_str, lang
)
# items with `translation` metadata will be used as translations...
candidate_items = [
i for i in items
if i.metadata.get('translation', 'false').lower() == 'false']
i for i in items if i.metadata.get("translation", "false").lower() == "false"
]
# ...unless all items with that slug are translations
if not candidate_items:
_warn_source_paths('All items ("%s") "%s" are translations',
items, with_str)
_warn_source_paths('All items ("%s") "%s" are translations', items, with_str)
candidate_items = items
# find items with default language
@ -691,13 +694,14 @@ def get_original_items(items, with_str):
# warn if there are several original items
if len(original_items) > 1:
_warn_source_paths('There are %s original (not translated) items %s',
original_items, with_str)
_warn_source_paths(
"There are %s original (not translated) items %s", original_items, with_str
)
return original_items
def order_content(content_list, order_by='slug'):
""" Sorts content.
def order_content(content_list, order_by="slug"):
"""Sorts content.
order_by can be a string of an attribute or sorting function. If order_by
is defined, content will be ordered by that attribute or sorting function.
@ -713,22 +717,22 @@ def order_content(content_list, order_by='slug'):
try:
content_list.sort(key=order_by)
except Exception:
logger.error('Error sorting with function %s', order_by)
logger.error("Error sorting with function %s", order_by)
elif isinstance(order_by, str):
if order_by.startswith('reversed-'):
if order_by.startswith("reversed-"):
order_reversed = True
order_by = order_by.replace('reversed-', '', 1)
order_by = order_by.replace("reversed-", "", 1)
else:
order_reversed = False
if order_by == 'basename':
if order_by == "basename":
content_list.sort(
key=lambda x: os.path.basename(x.source_path or ''),
reverse=order_reversed)
key=lambda x: os.path.basename(x.source_path or ""),
reverse=order_reversed,
)
else:
try:
content_list.sort(key=attrgetter(order_by),
reverse=order_reversed)
content_list.sort(key=attrgetter(order_by), reverse=order_reversed)
except AttributeError:
for content in content_list:
try:
@ -736,26 +740,31 @@ def order_content(content_list, order_by='slug'):
except AttributeError:
logger.warning(
'There is no "%s" attribute in "%s". '
'Defaulting to slug order.',
"Defaulting to slug order.",
order_by,
content.get_relative_source_path(),
extra={
'limit_msg': ('More files are missing '
'the needed attribute.')
})
"limit_msg": (
"More files are missing "
"the needed attribute."
)
},
)
else:
logger.warning(
'Invalid *_ORDER_BY setting (%s). '
'Valid options are strings and functions.', order_by)
"Invalid *_ORDER_BY setting (%s). "
"Valid options are strings and functions.",
order_by,
)
return content_list
def wait_for_changes(settings_file, reader_class, settings):
content_path = settings.get('PATH', '')
theme_path = settings.get('THEME', '')
content_path = settings.get("PATH", "")
theme_path = settings.get("THEME", "")
ignore_files = set(
fnmatch.translate(pattern) for pattern in settings.get('IGNORE_FILES', [])
fnmatch.translate(pattern) for pattern in settings.get("IGNORE_FILES", [])
)
candidate_paths = [
@ -765,7 +774,7 @@ def wait_for_changes(settings_file, reader_class, settings):
]
candidate_paths.extend(
os.path.join(content_path, path) for path in settings.get('STATIC_PATHS', [])
os.path.join(content_path, path) for path in settings.get("STATIC_PATHS", [])
)
watching_paths = []
@ -778,11 +787,13 @@ def wait_for_changes(settings_file, reader_class, settings):
else:
watching_paths.append(path)
return next(watchfiles.watch(
*watching_paths,
watch_filter=watchfiles.DefaultFilter(ignore_entity_patterns=ignore_files),
rust_timeout=0
))
return next(
watchfiles.watch(
*watching_paths,
watch_filter=watchfiles.DefaultFilter(ignore_entity_patterns=ignore_files),
rust_timeout=0,
)
)
def set_date_tzinfo(d, tz_name=None):
@ -811,7 +822,7 @@ def split_all(path):
"""
if isinstance(path, str):
components = []
path = path.lstrip('/')
path = path.lstrip("/")
while path:
head, tail = os.path.split(path)
if tail:
@ -827,32 +838,30 @@ def split_all(path):
return None
else:
raise TypeError(
'"path" was {}, must be string, None, or pathlib.Path'.format(
type(path)
)
'"path" was {}, must be string, None, or pathlib.Path'.format(type(path))
)
def is_selected_for_writing(settings, path):
'''Check whether path is selected for writing
"""Check whether path is selected for writing
according to the WRITE_SELECTED list
If WRITE_SELECTED is an empty list (default),
any path is selected for writing.
'''
if settings['WRITE_SELECTED']:
return path in settings['WRITE_SELECTED']
"""
if settings["WRITE_SELECTED"]:
return path in settings["WRITE_SELECTED"]
else:
return True
def path_to_file_url(path):
'''Convert file-system path to file:// URL'''
"""Convert file-system path to file:// URL"""
return urllib.parse.urljoin("file://", urllib.request.pathname2url(path))
def maybe_pluralize(count, singular, plural):
'''
"""
Returns a formatted string containing count and plural if count is not 1
Returns count and singular if count is 1
@ -860,22 +869,22 @@ def maybe_pluralize(count, singular, plural):
maybe_pluralize(1, 'Article', 'Articles') -> '1 Article'
maybe_pluralize(2, 'Article', 'Articles') -> '2 Articles'
'''
"""
selection = plural
if count == 1:
selection = singular
return '{} {}'.format(count, selection)
return "{} {}".format(count, selection)
@contextmanager
def temporary_locale(temp_locale=None, lc_category=locale.LC_ALL):
'''
"""
Enable code to run in a context with a temporary locale
Resets the locale back when exiting context.
Use tests.support.TestCaseWithCLocale if you want every unit test in a
class to use the C locale.
'''
"""
orig_locale = locale.setlocale(lc_category)
if temp_locale:
locale.setlocale(lc_category, temp_locale)

View file

@ -9,14 +9,18 @@ from markupsafe import Markup
from pelican.paginator import Paginator
from pelican.plugins import signals
from pelican.utils import (get_relative_path, is_selected_for_writing,
path_to_url, sanitised_join, set_date_tzinfo)
from pelican.utils import (
get_relative_path,
is_selected_for_writing,
path_to_url,
sanitised_join,
set_date_tzinfo,
)
logger = logging.getLogger(__name__)
class Writer:
def __init__(self, output_path, settings=None):
self.output_path = output_path
self.reminder = dict()
@ -25,24 +29,26 @@ class Writer:
self._overridden_files = set()
# See Content._link_replacer for details
if "RELATIVE_URLS" in self.settings and self.settings['RELATIVE_URLS']:
if "RELATIVE_URLS" in self.settings and self.settings["RELATIVE_URLS"]:
self.urljoiner = posix_join
else:
self.urljoiner = lambda base, url: urljoin(
base if base.endswith('/') else base + '/', str(url))
base if base.endswith("/") else base + "/", str(url)
)
def _create_new_feed(self, feed_type, feed_title, context):
feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed
feed_class = Rss201rev2Feed if feed_type == "rss" else Atom1Feed
if feed_title:
feed_title = context['SITENAME'] + ' - ' + feed_title
feed_title = context["SITENAME"] + " - " + feed_title
else:
feed_title = context['SITENAME']
feed_title = context["SITENAME"]
return feed_class(
title=Markup(feed_title).striptags(),
link=(self.site_url + '/'),
link=(self.site_url + "/"),
feed_url=self.feed_url,
description=context.get('SITESUBTITLE', ''),
subtitle=context.get('SITESUBTITLE', None))
description=context.get("SITESUBTITLE", ""),
subtitle=context.get("SITESUBTITLE", None),
)
def _add_item_to_the_feed(self, feed, item):
title = Markup(item.title).striptags()
@ -52,7 +58,7 @@ class Writer:
# RSS feeds use a single tag called 'description' for both the full
# content and the summary
content = None
if self.settings.get('RSS_FEED_SUMMARY_ONLY'):
if self.settings.get("RSS_FEED_SUMMARY_ONLY"):
description = item.summary
else:
description = item.get_content(self.site_url)
@ -71,9 +77,9 @@ class Writer:
description = None
categories = []
if hasattr(item, 'category'):
if hasattr(item, "category"):
categories.append(item.category)
if hasattr(item, 'tags'):
if hasattr(item, "tags"):
categories.extend(item.tags)
feed.add_item(
@ -83,14 +89,12 @@ class Writer:
description=description,
content=content,
categories=categories or None,
author_name=getattr(item, 'author', ''),
pubdate=set_date_tzinfo(
item.date, self.settings.get('TIMEZONE', None)
),
author_name=getattr(item, "author", ""),
pubdate=set_date_tzinfo(item.date, self.settings.get("TIMEZONE", None)),
updateddate=set_date_tzinfo(
item.modified, self.settings.get('TIMEZONE', None)
item.modified, self.settings.get("TIMEZONE", None)
)
if hasattr(item, 'modified')
if hasattr(item, "modified")
else None,
)
@ -102,22 +106,29 @@ class Writer:
"""
if filename in self._overridden_files:
if override:
raise RuntimeError('File %s is set to be overridden twice'
% filename)
logger.info('Skipping %s', filename)
raise RuntimeError("File %s is set to be overridden twice" % filename)
logger.info("Skipping %s", filename)
filename = os.devnull
elif filename in self._written_files:
if override:
logger.info('Overwriting %s', filename)
logger.info("Overwriting %s", filename)
else:
raise RuntimeError('File %s is to be overwritten' % filename)
raise RuntimeError("File %s is to be overwritten" % filename)
if override:
self._overridden_files.add(filename)
self._written_files.add(filename)
return open(filename, 'w', encoding=encoding)
return open(filename, "w", encoding=encoding)
def write_feed(self, elements, context, path=None, url=None,
feed_type='atom', override_output=False, feed_title=None):
def write_feed(
self,
elements,
context,
path=None,
url=None,
feed_type="atom",
override_output=False,
feed_title=None,
):
"""Generate a feed with the list of articles provided
Return the feed. If no path or output_path is specified, just
@ -137,16 +148,15 @@ class Writer:
if not is_selected_for_writing(self.settings, path):
return
self.site_url = context.get(
'SITEURL', path_to_url(get_relative_path(path)))
self.site_url = context.get("SITEURL", path_to_url(get_relative_path(path)))
self.feed_domain = context.get('FEED_DOMAIN')
self.feed_domain = context.get("FEED_DOMAIN")
self.feed_url = self.urljoiner(self.feed_domain, url or path)
feed = self._create_new_feed(feed_type, feed_title, context)
# FEED_MAX_ITEMS = None means [:None] to get every element
for element in elements[:self.settings['FEED_MAX_ITEMS']]:
for element in elements[: self.settings["FEED_MAX_ITEMS"]]:
self._add_item_to_the_feed(feed, element)
signals.feed_generated.send(context, feed=feed)
@ -158,17 +168,25 @@ class Writer:
except Exception:
pass
with self._open_w(complete_path, 'utf-8', override_output) as fp:
feed.write(fp, 'utf-8')
logger.info('Writing %s', complete_path)
with self._open_w(complete_path, "utf-8", override_output) as fp:
feed.write(fp, "utf-8")
logger.info("Writing %s", complete_path)
signals.feed_written.send(
complete_path, context=context, feed=feed)
signals.feed_written.send(complete_path, context=context, feed=feed)
return feed
def write_file(self, name, template, context, relative_urls=False,
paginated=None, template_name=None, override_output=False,
url=None, **kwargs):
def write_file(
self,
name,
template,
context,
relative_urls=False,
paginated=None,
template_name=None,
override_output=False,
url=None,
**kwargs,
):
"""Render the template and write the file.
:param name: name of the file to output
@ -185,10 +203,13 @@ class Writer:
:param **kwargs: additional variables to pass to the templates
"""
if name is False or \
name == "" or \
not is_selected_for_writing(self.settings,
os.path.join(self.output_path, name)):
if (
name is False
or name == ""
or not is_selected_for_writing(
self.settings, os.path.join(self.output_path, name)
)
):
return
elif not name:
# other stuff, just return for now
@ -197,8 +218,8 @@ class Writer:
def _write_file(template, localcontext, output_path, name, override):
"""Render the template write the file."""
# set localsiteurl for context so that Contents can adjust links
if localcontext['localsiteurl']:
context['localsiteurl'] = localcontext['localsiteurl']
if localcontext["localsiteurl"]:
context["localsiteurl"] = localcontext["localsiteurl"]
output = template.render(localcontext)
path = sanitised_join(output_path, name)
@ -207,9 +228,9 @@ class Writer:
except Exception:
pass
with self._open_w(path, 'utf-8', override=override) as f:
with self._open_w(path, "utf-8", override=override) as f:
f.write(output)
logger.info('Writing %s', path)
logger.info("Writing %s", path)
# Send a signal to say we're writing a file with some specific
# local context.
@ -217,54 +238,66 @@ class Writer:
def _get_localcontext(context, name, kwargs, relative_urls):
localcontext = context.copy()
localcontext['localsiteurl'] = localcontext.get(
'localsiteurl', None)
localcontext["localsiteurl"] = localcontext.get("localsiteurl", None)
if relative_urls:
relative_url = path_to_url(get_relative_path(name))
localcontext['SITEURL'] = relative_url
localcontext['localsiteurl'] = relative_url
localcontext['output_file'] = name
localcontext["SITEURL"] = relative_url
localcontext["localsiteurl"] = relative_url
localcontext["output_file"] = name
localcontext.update(kwargs)
return localcontext
if paginated is None:
paginated = {key: val for key, val in kwargs.items()
if key in {'articles', 'dates'}}
paginated = {
key: val for key, val in kwargs.items() if key in {"articles", "dates"}
}
# pagination
if paginated and template_name in self.settings['PAGINATED_TEMPLATES']:
if paginated and template_name in self.settings["PAGINATED_TEMPLATES"]:
# pagination needed
per_page = self.settings['PAGINATED_TEMPLATES'][template_name] \
or self.settings['DEFAULT_PAGINATION']
per_page = (
self.settings["PAGINATED_TEMPLATES"][template_name]
or self.settings["DEFAULT_PAGINATION"]
)
# init paginators
paginators = {key: Paginator(name, url, val, self.settings,
per_page)
for key, val in paginated.items()}
paginators = {
key: Paginator(name, url, val, self.settings, per_page)
for key, val in paginated.items()
}
# generated pages, and write
for page_num in range(list(paginators.values())[0].num_pages):
paginated_kwargs = kwargs.copy()
for key in paginators.keys():
paginator = paginators[key]
previous_page = paginator.page(page_num) \
if page_num > 0 else None
previous_page = paginator.page(page_num) if page_num > 0 else None
page = paginator.page(page_num + 1)
next_page = paginator.page(page_num + 2) \
if page_num + 1 < paginator.num_pages else None
next_page = (
paginator.page(page_num + 2)
if page_num + 1 < paginator.num_pages
else None
)
paginated_kwargs.update(
{'%s_paginator' % key: paginator,
'%s_page' % key: page,
'%s_previous_page' % key: previous_page,
'%s_next_page' % key: next_page})
{
"%s_paginator" % key: paginator,
"%s_page" % key: page,
"%s_previous_page" % key: previous_page,
"%s_next_page" % key: next_page,
}
)
localcontext = _get_localcontext(
context, page.save_as, paginated_kwargs, relative_urls)
_write_file(template, localcontext, self.output_path,
page.save_as, override_output)
context, page.save_as, paginated_kwargs, relative_urls
)
_write_file(
template,
localcontext,
self.output_path,
page.save_as,
override_output,
)
else:
# no pagination
localcontext = _get_localcontext(
context, name, kwargs, relative_urls)
_write_file(template, localcontext, self.output_path, name,
override_output)
localcontext = _get_localcontext(context, name, kwargs, relative_urls)
_write_file(template, localcontext, self.output_path, name, override_output)

View file

@ -29,15 +29,15 @@ classifiers = [
]
requires-python = ">=3.8.1,<4.0"
dependencies = [
"blinker>=1.4",
"docutils>=0.16",
"feedgenerator>=1.9",
"jinja2>=2.7",
"pygments>=2.6",
"python-dateutil>=2.8",
"rich>=10.1",
"unidecode>=1.1",
"backports-zoneinfo<1.0.0,>=0.2.1;python_version<'3.9'",
"blinker>=1.6.3",
"docutils>=0.20.1",
"feedgenerator>=2.1.0",
"jinja2>=3.1.2",
"pygments>=2.16.1",
"python-dateutil>=2.8.2",
"rich>=13.6.0",
"unidecode>=1.3.7",
"backports-zoneinfo>=0.2.1; python_version < \"3.9\"",
"watchfiles>=0.21.0",
]
@ -76,28 +76,24 @@ test = "invoke tests"
[tool.pdm.dev-dependencies]
dev = [
"BeautifulSoup4<5.0,>=4.9",
"jinja2~=3.1.2",
"lxml<5.0,>=4.3",
"markdown~=3.4.3",
"typogrify<3.0,>=2.0",
"sphinx<6.0,>=5.1",
"furo==2023.03.27",
"livereload<3.0,>=2.6",
"psutil<6.0,>=5.7",
"pygments~=2.15",
"pytest<8.0,>=7.1",
"pytest-cov<5.0,>=4.0",
"pytest-sugar<1.0.0,>=0.9.5",
"pytest-xdist<3.0,>=2.0",
"tox<4.0,>=3.13",
"flake8<4.0,>=3.8",
"flake8-import-order<1.0.0,>=0.18.1",
"invoke<3.0,>=2.0",
"isort<6.0,>=5.2",
"black<20.0,>=19.10b0",
"ruff>=0.1.3,<1.0.0",
"tomli;python_version<'3.11'",
"BeautifulSoup4>=4.12.2",
"jinja2>=3.1.2",
"lxml>=4.9.3",
"markdown>=3.5",
"typogrify>=2.0.7",
"sphinx>=7.1.2",
"furo>=2023.9.10",
"livereload>=2.6.3",
"psutil>=5.9.6",
"pygments>=2.16.1",
"pytest>=7.4.3",
"pytest-cov>=4.1.0",
"pytest-sugar>=0.9.7",
"pytest-xdist>=3.3.1",
"tox>=4.11.3",
"invoke>=2.2.0",
"ruff>=0.1.3",
"tomli>=2.0.1; python_version < \"3.11\"",
]
[tool.pdm.build]

View file

@ -1,2 +0,0 @@
flake8==3.9.2
flake8-import-order

View file

@ -6,4 +6,3 @@ This is a test hidden page
This is great for things like error(404) pages
Anyone can see this page but it's not linked to anywhere!

View file

@ -45,7 +45,7 @@ Testing more sourcecode directives
:lineseparator: <br>
:linespans: foo
:nobackground:
def run(self):
self.assert_has_content()
try:
@ -76,8 +76,8 @@ Testing even more sourcecode directives
.. sourcecode:: python
:linenos: table
:nowrap:
formatter = self.options and VARIANTS[self.options.keys()[0]]
@ -90,8 +90,8 @@ Even if the default is line numbers, we can override it here
.. sourcecode:: python
:linenos: none
formatter = self.options and VARIANTS[self.options.keys()[0]]

View file

@ -1,55 +1,59 @@
AUTHOR = 'Alexis Métaireau'
AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log"
SITESUBTITLE = 'A personal blog.'
SITEURL = 'http://blog.notmyidea.org'
SITESUBTITLE = "A personal blog."
SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "Europe/Paris"
# can be useful in development, but set to False when you're ready to publish
RELATIVE_URLS = True
GITHUB_URL = 'http://github.com/ametaireau/'
GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea"
REVERSE_CATEGORY_ORDER = True
LOCALE = "C"
DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
FEED_ALL_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml'
FEED_ALL_RSS = "feeds/all.rss.xml"
CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"),
('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),)
LINKS = (
("Biologeek", "http://biologeek.org"),
("Filyb", "http://filyb.info/"),
("Libert-fr", "http://www.libert-fr.com"),
("N1k0", "http://prendreuncafe.com/blog/"),
("Tarek Ziadé", "http://ziade.org/blog"),
("Zubin Mithra", "http://zubin71.wordpress.com/"),
)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
('lastfm', 'http://lastfm.com/user/akounet'),
('github', 'http://github.com/ametaireau'),)
SOCIAL = (
("twitter", "http://twitter.com/ametaireau"),
("lastfm", "http://lastfm.com/user/akounet"),
("github", "http://github.com/ametaireau"),
)
# global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'}
DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
}
"extra/robots.txt": {"path": "robots.txt"},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
'images',
'extra/robots.txt',
]
"images",
"extra/robots.txt",
]
# custom page generated with a jinja2 template
TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'}
TEMPLATE_PAGES = {"pages/jinja2_template.html": "jinja2_template.html"}
# there is no other HTML content
READERS = {'html': None}
READERS = {"html": None}
# code blocks with line numbers
PYGMENTS_RST_OPTIONS = {'linenos': 'table'}
PYGMENTS_RST_OPTIONS = {"linenos": "table"}
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps

View file

@ -1,56 +1,60 @@
AUTHOR = 'Alexis Métaireau'
AUTHOR = "Alexis Métaireau"
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org'
SITEURL = "http://blog.notmyidea.org"
TIMEZONE = "Europe/Paris"
# can be useful in development, but set to False when you're ready to publish
RELATIVE_URLS = True
GITHUB_URL = 'http://github.com/ametaireau/'
GITHUB_URL = "http://github.com/ametaireau/"
DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
LOCALE = "fr_FR.UTF-8"
DEFAULT_PAGINATION = 4
DEFAULT_DATE = (2012, 3, 2, 14, 1, 1)
DEFAULT_DATE_FORMAT = '%d %B %Y'
DEFAULT_DATE_FORMAT = "%d %B %Y"
ARTICLE_URL = 'posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/'
ARTICLE_SAVE_AS = ARTICLE_URL + 'index.html'
ARTICLE_URL = "posts/{date:%Y}/{date:%B}/{date:%d}/{slug}/"
ARTICLE_SAVE_AS = ARTICLE_URL + "index.html"
FEED_ALL_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/{slug}.rss.xml'
FEED_ALL_RSS = "feeds/all.rss.xml"
CATEGORY_FEED_RSS = "feeds/{slug}.rss.xml"
LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"),
('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),)
LINKS = (
("Biologeek", "http://biologeek.org"),
("Filyb", "http://filyb.info/"),
("Libert-fr", "http://www.libert-fr.com"),
("N1k0", "http://prendreuncafe.com/blog/"),
("Tarek Ziadé", "http://ziade.org/blog"),
("Zubin Mithra", "http://zubin71.wordpress.com/"),
)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
('lastfm', 'http://lastfm.com/user/akounet'),
('github', 'http://github.com/ametaireau'),)
SOCIAL = (
("twitter", "http://twitter.com/ametaireau"),
("lastfm", "http://lastfm.com/user/akounet"),
("github", "http://github.com/ametaireau"),
)
# global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'}
DEFAULT_METADATA = {"yeah": "it is"}
# path-specific metadata
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
}
"extra/robots.txt": {"path": "robots.txt"},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
'pictures',
'extra/robots.txt',
]
"pictures",
"extra/robots.txt",
]
# custom page generated with a jinja2 template
TEMPLATE_PAGES = {'pages/jinja2_template.html': 'jinja2_template.html'}
TEMPLATE_PAGES = {"pages/jinja2_template.html": "jinja2_template.html"}
# code blocks with line numbers
PYGMENTS_RST_OPTIONS = {'linenos': 'table'}
PYGMENTS_RST_OPTIONS = {"linenos": "table"}
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps

View file

@ -52,24 +52,16 @@ def coverage(c):
@task
def black(c, check=False, diff=False):
"""Run Black auto-formatter, optionally with --check or --diff"""
def format(c, check=False, diff=False):
"""Run Ruff's auto-formatter, optionally with --check or --diff"""
check_flag, diff_flag = "", ""
if check:
check_flag = "--check"
if diff:
diff_flag = "--diff"
c.run(f"{VENV_BIN}/black {check_flag} {diff_flag} {PKG_PATH} tasks.py", pty=PTY)
@task
def isort(c, check=False, diff=False):
check_flag, diff_flag = "", ""
if check:
check_flag = "-c"
if diff:
diff_flag = "--diff"
c.run(f"{VENV_BIN}/isort {check_flag} {diff_flag} .", pty=PTY)
c.run(
f"{VENV_BIN}/ruff format {check_flag} {diff_flag} {PKG_PATH} tasks.py", pty=PTY
)
@task
@ -87,6 +79,7 @@ def ruff(c, fix=False, diff=False):
def lint(c, fix=False, diff=False):
"""Check code style via linting tools."""
ruff(c, fix=fix, diff=diff)
format(c, check=not fix, diff=diff)
@task

View file

@ -1,5 +1,5 @@
[tox]
envlist = py{3.8,3.9,3.10,3.11.3.12},docs,flake8
envlist = py{3.8,3.9,3.10,3.11.3.12},docs
[testenv]
basepython =