mirror of
https://github.com/getpelican/pelican.git
synced 2025-10-15 20:28:56 +02:00
Merge pull request #3314 from boxydog/more_ruff_fixes2
This commit is contained in:
commit
59756f8faf
18 changed files with 81 additions and 86 deletions
|
|
@ -11,3 +11,5 @@ db241feaa445375dc05e189e69287000ffe5fa8e
|
||||||
0bd02c00c078fe041b65fbf4eab13601bb42676d
|
0bd02c00c078fe041b65fbf4eab13601bb42676d
|
||||||
# Apply more Ruff checks to code
|
# Apply more Ruff checks to code
|
||||||
9d30c5608a58d202b1c02d55651e6ac746bfb173
|
9d30c5608a58d202b1c02d55651e6ac746bfb173
|
||||||
|
# Apply yet more Ruff checks to code
|
||||||
|
7577dd7603f7cb3a09922d1edb65b6eafb6e2ac7
|
||||||
|
|
|
||||||
|
|
@ -417,7 +417,7 @@ def parse_arguments(argv=None):
|
||||||
"--relative-urls",
|
"--relative-urls",
|
||||||
dest="relative_paths",
|
dest="relative_paths",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Use relative urls in output, " "useful for site development",
|
help="Use relative urls in output, useful for site development",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
|
@ -433,7 +433,7 @@ def parse_arguments(argv=None):
|
||||||
"--ignore-cache",
|
"--ignore-cache",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
dest="ignore_cache",
|
dest="ignore_cache",
|
||||||
help="Ignore content cache " "from previous runs by not loading cache files.",
|
help="Ignore content cache from previous runs by not loading cache files.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
|
@ -488,7 +488,7 @@ def parse_arguments(argv=None):
|
||||||
"-b",
|
"-b",
|
||||||
"--bind",
|
"--bind",
|
||||||
dest="bind",
|
dest="bind",
|
||||||
help="IP to bind to when serving files via HTTP " "(default: 127.0.0.1)",
|
help="IP to bind to when serving files via HTTP (default: 127.0.0.1)",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ class Content:
|
||||||
self._context = context
|
self._context = context
|
||||||
self.translations = []
|
self.translations = []
|
||||||
|
|
||||||
local_metadata = dict()
|
local_metadata = {}
|
||||||
local_metadata.update(metadata)
|
local_metadata.update(metadata)
|
||||||
|
|
||||||
# set metadata as attributes
|
# set metadata as attributes
|
||||||
|
|
@ -357,7 +357,7 @@ class Content:
|
||||||
origin = joiner(siteurl, Author(path, self.settings).url)
|
origin = joiner(siteurl, Author(path, self.settings).url)
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Replacement Indicator '%s' not recognized, " "skipping replacement",
|
"Replacement Indicator '%s' not recognized, skipping replacement",
|
||||||
what,
|
what,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -156,7 +156,7 @@ class Generator:
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_files(self, paths, exclude=[], extensions=None):
|
def get_files(self, paths, exclude=None, extensions=None):
|
||||||
"""Return a list of files to use, based on rules
|
"""Return a list of files to use, based on rules
|
||||||
|
|
||||||
:param paths: the list pf paths to search (relative to self.path)
|
:param paths: the list pf paths to search (relative to self.path)
|
||||||
|
|
@ -164,6 +164,8 @@ class Generator:
|
||||||
:param extensions: the list of allowed extensions (if False, all
|
:param extensions: the list of allowed extensions (if False, all
|
||||||
extensions are allowed)
|
extensions are allowed)
|
||||||
"""
|
"""
|
||||||
|
if exclude is None:
|
||||||
|
exclude = []
|
||||||
# backward compatibility for older generators
|
# backward compatibility for older generators
|
||||||
if isinstance(paths, str):
|
if isinstance(paths, str):
|
||||||
paths = [paths]
|
paths = [paths]
|
||||||
|
|
@ -1068,7 +1070,7 @@ class StaticGenerator(Generator):
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if err.errno == errno.EXDEV: # 18: Invalid cross-device link
|
if err.errno == errno.EXDEV: # 18: Invalid cross-device link
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Cross-device links not valid. " "Creating symbolic links instead."
|
"Cross-device links not valid. Creating symbolic links instead."
|
||||||
)
|
)
|
||||||
self.fallback_to_symlinks = True
|
self.fallback_to_symlinks = True
|
||||||
self._link_staticfile(sc)
|
self._link_staticfile(sc)
|
||||||
|
|
|
||||||
|
|
@ -131,9 +131,8 @@ class Page:
|
||||||
if not self.has_next():
|
if not self.has_next():
|
||||||
rule = p
|
rule = p
|
||||||
break
|
break
|
||||||
else:
|
elif p.min_page <= self.number:
|
||||||
if p.min_page <= self.number:
|
rule = p
|
||||||
rule = p
|
|
||||||
|
|
||||||
if not rule:
|
if not rule:
|
||||||
return ""
|
return ""
|
||||||
|
|
|
||||||
|
|
@ -199,7 +199,7 @@ class RstReader(BaseReader):
|
||||||
self._language_code = lang_code
|
self._language_code = lang_code
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Docutils has no localization for '%s'." " Using 'en' instead.",
|
"Docutils has no localization for '%s'. Using 'en' instead.",
|
||||||
lang_code,
|
lang_code,
|
||||||
)
|
)
|
||||||
self._language_code = "en"
|
self._language_code = "en"
|
||||||
|
|
@ -320,7 +320,7 @@ class MarkdownReader(BaseReader):
|
||||||
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
|
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
|
||||||
if len(value) > 1:
|
if len(value) > 1:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Duplicate definition of `%s` " "for %s. Using first one.",
|
"Duplicate definition of `%s` for %s. Using first one.",
|
||||||
name,
|
name,
|
||||||
self._source_path,
|
self._source_path,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ class abbreviation(nodes.Inline, nodes.TextElement):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
|
def abbr_role(typ, rawtext, text, lineno, inliner, options=None, content=None):
|
||||||
text = utils.unescape(text)
|
text = utils.unescape(text)
|
||||||
m = _abbr_re.search(text)
|
m = _abbr_re.search(text)
|
||||||
if m is None:
|
if m is None:
|
||||||
|
|
|
||||||
|
|
@ -447,7 +447,7 @@ def handle_deprecated_settings(settings: Settings) -> Settings:
|
||||||
and not isinstance(settings[key], Path)
|
and not isinstance(settings[key], Path)
|
||||||
and "%s" in settings[key]
|
and "%s" in settings[key]
|
||||||
):
|
):
|
||||||
logger.warning("%%s usage in %s is deprecated, use {lang} " "instead.", key)
|
logger.warning("%%s usage in %s is deprecated, use {lang} instead.", key)
|
||||||
try:
|
try:
|
||||||
settings[key] = _printf_s_to_format_field(settings[key], "lang")
|
settings[key] = _printf_s_to_format_field(settings[key], "lang")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
|
|
@ -470,7 +470,7 @@ def handle_deprecated_settings(settings: Settings) -> Settings:
|
||||||
and not isinstance(settings[key], Path)
|
and not isinstance(settings[key], Path)
|
||||||
and "%s" in settings[key]
|
and "%s" in settings[key]
|
||||||
):
|
):
|
||||||
logger.warning("%%s usage in %s is deprecated, use {slug} " "instead.", key)
|
logger.warning("%%s usage in %s is deprecated, use {slug} instead.", key)
|
||||||
try:
|
try:
|
||||||
settings[key] = _printf_s_to_format_field(settings[key], "slug")
|
settings[key] = _printf_s_to_format_field(settings[key], "slug")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
|
|
@ -614,7 +614,7 @@ def configure_settings(settings: Settings) -> Settings:
|
||||||
if key in settings and not isinstance(settings[key], types):
|
if key in settings and not isinstance(settings[key], types):
|
||||||
value = settings.pop(key)
|
value = settings.pop(key)
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Detected misconfigured %s (%s), " "falling back to the default (%s)",
|
"Detected misconfigured %s (%s), falling back to the default (%s)",
|
||||||
key,
|
key,
|
||||||
value,
|
value,
|
||||||
DEFAULT_CONFIG[key],
|
DEFAULT_CONFIG[key],
|
||||||
|
|
@ -676,7 +676,7 @@ def configure_settings(settings: Settings) -> Settings:
|
||||||
if any(settings.get(k) for k in feed_keys):
|
if any(settings.get(k) for k in feed_keys):
|
||||||
if not settings.get("SITEURL"):
|
if not settings.get("SITEURL"):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Feeds generated without SITEURL set properly may" " not be valid"
|
"Feeds generated without SITEURL set properly may not be valid"
|
||||||
)
|
)
|
||||||
|
|
||||||
if "TIMEZONE" not in settings:
|
if "TIMEZONE" not in settings:
|
||||||
|
|
|
||||||
|
|
@ -314,7 +314,7 @@ class TestPage(TestBase):
|
||||||
args["settings"] = settings
|
args["settings"] = settings
|
||||||
|
|
||||||
# Tag
|
# Tag
|
||||||
args["content"] = "A simple test, with a " '<a href="|tag|tagname">link</a>'
|
args["content"] = 'A simple test, with a <a href="|tag|tagname">link</a>'
|
||||||
page = Page(**args)
|
page = Page(**args)
|
||||||
content = page.get_content("http://notmyidea.org")
|
content = page.get_content("http://notmyidea.org")
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|
@ -326,9 +326,7 @@ class TestPage(TestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
# Category
|
# Category
|
||||||
args["content"] = (
|
args["content"] = 'A simple test, with a <a href="|category|category">link</a>'
|
||||||
"A simple test, with a " '<a href="|category|category">link</a>'
|
|
||||||
)
|
|
||||||
page = Page(**args)
|
page = Page(**args)
|
||||||
content = page.get_content("http://notmyidea.org")
|
content = page.get_content("http://notmyidea.org")
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|
@ -350,7 +348,7 @@ class TestPage(TestBase):
|
||||||
|
|
||||||
# Classic intrasite link via filename
|
# Classic intrasite link via filename
|
||||||
args["content"] = (
|
args["content"] = (
|
||||||
"A simple test, with a " '<a href="|filename|article.rst">link</a>'
|
'A simple test, with a <a href="|filename|article.rst">link</a>'
|
||||||
)
|
)
|
||||||
content = Page(**args).get_content("http://notmyidea.org")
|
content = Page(**args).get_content("http://notmyidea.org")
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|
@ -401,7 +399,7 @@ class TestPage(TestBase):
|
||||||
|
|
||||||
# also test for summary in metadata
|
# also test for summary in metadata
|
||||||
parsed = (
|
parsed = (
|
||||||
"A simple summary test, with a " '<a href="|filename|article.rst">link</a>'
|
'A simple summary test, with a <a href="|filename|article.rst">link</a>'
|
||||||
)
|
)
|
||||||
linked = (
|
linked = (
|
||||||
"A simple summary test, with a "
|
"A simple summary test, with a "
|
||||||
|
|
@ -594,7 +592,7 @@ class TestPage(TestBase):
|
||||||
|
|
||||||
# An intrasite link via filename with %20 as a space
|
# An intrasite link via filename with %20 as a space
|
||||||
args["content"] = (
|
args["content"] = (
|
||||||
"A simple test, with a " '<a href="|filename|article%20spaces.rst">link</a>'
|
'A simple test, with a <a href="|filename|article%20spaces.rst">link</a>'
|
||||||
)
|
)
|
||||||
content = Page(**args).get_content("http://notmyidea.org")
|
content = Page(**args).get_content("http://notmyidea.org")
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|
@ -834,10 +832,10 @@ class TestStatic(LoggedTestCase):
|
||||||
|
|
||||||
otherdir_settings = self.settings.copy()
|
otherdir_settings = self.settings.copy()
|
||||||
otherdir_settings.update(
|
otherdir_settings.update(
|
||||||
dict(
|
{
|
||||||
PAGE_SAVE_AS=os.path.join("otherpages", "{slug}.html"),
|
"PAGE_SAVE_AS": os.path.join("otherpages", "{slug}.html"),
|
||||||
PAGE_URL="otherpages/{slug}.html",
|
"PAGE_URL": "otherpages/{slug}.html",
|
||||||
)
|
}
|
||||||
)
|
)
|
||||||
otherdir_page = Page(
|
otherdir_page = Page(
|
||||||
content="other page",
|
content="other page",
|
||||||
|
|
@ -892,7 +890,7 @@ class TestStatic(LoggedTestCase):
|
||||||
"""
|
"""
|
||||||
customstatic = Static(
|
customstatic = Static(
|
||||||
content=None,
|
content=None,
|
||||||
metadata=dict(save_as="customfoo.jpg", url="customfoo.jpg"),
|
metadata={"save_as": "customfoo.jpg", "url": "customfoo.jpg"},
|
||||||
settings=self.settings,
|
settings=self.settings,
|
||||||
source_path=os.path.join("dir", "foo.jpg"),
|
source_path=os.path.join("dir", "foo.jpg"),
|
||||||
context=self.settings.copy(),
|
context=self.settings.copy(),
|
||||||
|
|
@ -1066,9 +1064,9 @@ class TestStatic(LoggedTestCase):
|
||||||
|
|
||||||
static = Static(
|
static = Static(
|
||||||
content=None,
|
content=None,
|
||||||
metadata=dict(
|
metadata={
|
||||||
status="draft",
|
"status": "draft",
|
||||||
),
|
},
|
||||||
settings=self.settings,
|
settings=self.settings,
|
||||||
source_path=os.path.join("dir", "foo.jpg"),
|
source_path=os.path.join("dir", "foo.jpg"),
|
||||||
context=self.settings.copy(),
|
context=self.settings.copy(),
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
|
||||||
)
|
)
|
||||||
comment_titles = {x[0] for x in test_posts if x[8] == "comment"}
|
comment_titles = {x[0] for x in test_posts if x[8] == "comment"}
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{"Mishka, always a pleasure to read your " "adventures!..."}, comment_titles
|
{"Mishka, always a pleasure to read your adventures!..."}, comment_titles
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_recognise_status_with_correct_filename(self):
|
def test_recognise_status_with_correct_filename(self):
|
||||||
|
|
@ -478,7 +478,7 @@ class TestBuildHeader(unittest.TestCase):
|
||||||
attachments=["output/test1", "output/test2"],
|
attachments=["output/test1", "output/test2"],
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
header, ("test\n####\n" ":attachments: output/test1, " "output/test2\n\n")
|
header, ("test\n####\n:attachments: output/test1, output/test2\n\n")
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_galleries_added_to_markdown_header(self):
|
def test_galleries_added_to_markdown_header(self):
|
||||||
|
|
@ -521,10 +521,10 @@ class TestWordpressXMLAttachements(TestCaseWithCLocale):
|
||||||
self.assertEqual(self.attachments[post], expected)
|
self.assertEqual(self.attachments[post], expected)
|
||||||
elif post == "with-excerpt":
|
elif post == "with-excerpt":
|
||||||
expected_invalid = (
|
expected_invalid = (
|
||||||
"http://thisurlisinvalid.notarealdomain/" "not_an_image.jpg"
|
"http://thisurlisinvalid.notarealdomain/not_an_image.jpg"
|
||||||
)
|
)
|
||||||
expected_pelikan = (
|
expected_pelikan = (
|
||||||
"http://en.wikipedia.org/wiki/" "File:Pelikan_Walvis_Bay.jpg"
|
"http://en.wikipedia.org/wiki/File:Pelikan_Walvis_Bay.jpg"
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
self.attachments[post], {expected_invalid, expected_pelikan}
|
self.attachments[post], {expected_invalid, expected_pelikan}
|
||||||
|
|
@ -533,9 +533,7 @@ class TestWordpressXMLAttachements(TestCaseWithCLocale):
|
||||||
expected_invalid = "http://thisurlisinvalid.notarealdomain"
|
expected_invalid = "http://thisurlisinvalid.notarealdomain"
|
||||||
self.assertEqual(self.attachments[post], {expected_invalid})
|
self.assertEqual(self.attachments[post], {expected_invalid})
|
||||||
else:
|
else:
|
||||||
self.fail(
|
self.fail(f"all attachments should match to a filename or None, {post}")
|
||||||
"all attachments should match to a " f"filename or None, {post}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_download_attachments(self):
|
def test_download_attachments(self):
|
||||||
real_file = os.path.join(CUR_DIR, "content/article.rst")
|
real_file = os.path.join(CUR_DIR, "content/article.rst")
|
||||||
|
|
|
||||||
|
|
@ -591,7 +591,7 @@ class MdReaderTest(ReaderTest):
|
||||||
"modified": SafeDatetime(2012, 11, 1),
|
"modified": SafeDatetime(2012, 11, 1),
|
||||||
"multiline": [
|
"multiline": [
|
||||||
"Line Metadata should be handle properly.",
|
"Line Metadata should be handle properly.",
|
||||||
"See syntax of Meta-Data extension of " "Python Markdown package:",
|
"See syntax of Meta-Data extension of Python Markdown package:",
|
||||||
"If a line is indented by 4 or more spaces,",
|
"If a line is indented by 4 or more spaces,",
|
||||||
"that line is assumed to be an additional line of the value",
|
"that line is assumed to be an additional line of the value",
|
||||||
"for the previous keyword.",
|
"for the previous keyword.",
|
||||||
|
|
|
||||||
|
|
@ -922,14 +922,14 @@ class TestSanitisedJoin(unittest.TestCase):
|
||||||
def test_detect_parent_breakout(self):
|
def test_detect_parent_breakout(self):
|
||||||
with self.assertRaisesRegex(
|
with self.assertRaisesRegex(
|
||||||
RuntimeError,
|
RuntimeError,
|
||||||
"Attempted to break out of output directory to " "(.*?:)?/foo/test",
|
"Attempted to break out of output directory to (.*?:)?/foo/test",
|
||||||
): # (.*?:)? accounts for Windows root
|
): # (.*?:)? accounts for Windows root
|
||||||
utils.sanitised_join("/foo/bar", "../test")
|
utils.sanitised_join("/foo/bar", "../test")
|
||||||
|
|
||||||
def test_detect_root_breakout(self):
|
def test_detect_root_breakout(self):
|
||||||
with self.assertRaisesRegex(
|
with self.assertRaisesRegex(
|
||||||
RuntimeError,
|
RuntimeError,
|
||||||
"Attempted to break out of output directory to " "(.*?:)?/test",
|
"Attempted to break out of output directory to (.*?:)?/test",
|
||||||
): # (.*?:)? accounts for Windows root
|
): # (.*?:)? accounts for Windows root
|
||||||
utils.sanitised_join("/foo/bar", "/test")
|
utils.sanitised_join("/foo/bar", "/test")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1095,7 +1095,7 @@ def fields2pelican(
|
||||||
|
|
||||||
if posts_require_pandoc:
|
if posts_require_pandoc:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Pandoc must be installed to import the following posts:" "\n {}".format(
|
"Pandoc must be installed to import the following posts:\n {}".format(
|
||||||
"\n ".join(posts_require_pandoc)
|
"\n ".join(posts_require_pandoc)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
@ -1232,7 +1232,7 @@ def main():
|
||||||
exit(error)
|
exit(error)
|
||||||
|
|
||||||
if args.wp_attach and input_type != "wordpress":
|
if args.wp_attach and input_type != "wordpress":
|
||||||
error = "You must be importing a wordpress xml " "to use the --wp-attach option"
|
error = "You must be importing a wordpress xml to use the --wp-attach option"
|
||||||
exit(error)
|
exit(error)
|
||||||
|
|
||||||
if input_type == "blogger":
|
if input_type == "blogger":
|
||||||
|
|
|
||||||
|
|
@ -103,11 +103,10 @@ def ask(question, answer=str, default=None, length=None):
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
print("You must enter something")
|
print("You must enter something")
|
||||||
|
elif length and len(r) != length:
|
||||||
|
print(f"Entry must be {length} characters long")
|
||||||
else:
|
else:
|
||||||
if length and len(r) != length:
|
break
|
||||||
print(f"Entry must be {length} characters long")
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
@ -169,7 +168,7 @@ def ask_timezone(question, default, tzurl):
|
||||||
r = tz_dict[r]
|
r = tz_dict[r]
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
print("Please enter a valid time zone:\n" f" (check [{tzurl}])")
|
print(f"Please enter a valid time zone:\n (check [{tzurl}])")
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -253,7 +252,7 @@ needed by Pelican.
|
||||||
default=True,
|
default=True,
|
||||||
):
|
):
|
||||||
CONF["siteurl"] = ask(
|
CONF["siteurl"] = ask(
|
||||||
"What is your URL prefix? (see " "above example; no trailing slash)",
|
"What is your URL prefix? (see above example; no trailing slash)",
|
||||||
str,
|
str,
|
||||||
CONF["siteurl"],
|
CONF["siteurl"],
|
||||||
)
|
)
|
||||||
|
|
@ -266,7 +265,7 @@ needed by Pelican.
|
||||||
|
|
||||||
if CONF["with_pagination"]:
|
if CONF["with_pagination"]:
|
||||||
CONF["default_pagination"] = ask(
|
CONF["default_pagination"] = ask(
|
||||||
"How many articles per page " "do you want?",
|
"How many articles per page do you want?",
|
||||||
int,
|
int,
|
||||||
CONF["default_pagination"],
|
CONF["default_pagination"],
|
||||||
)
|
)
|
||||||
|
|
@ -296,7 +295,7 @@ needed by Pelican.
|
||||||
"What is your username on that server?", str, CONF["ftp_user"]
|
"What is your username on that server?", str, CONF["ftp_user"]
|
||||||
)
|
)
|
||||||
CONF["ftp_target_dir"] = ask(
|
CONF["ftp_target_dir"] = ask(
|
||||||
"Where do you want to put your " "web site on that server?",
|
"Where do you want to put your web site on that server?",
|
||||||
str,
|
str,
|
||||||
CONF["ftp_target_dir"],
|
CONF["ftp_target_dir"],
|
||||||
)
|
)
|
||||||
|
|
@ -314,7 +313,7 @@ needed by Pelican.
|
||||||
"What is your username on that server?", str, CONF["ssh_user"]
|
"What is your username on that server?", str, CONF["ssh_user"]
|
||||||
)
|
)
|
||||||
CONF["ssh_target_dir"] = ask(
|
CONF["ssh_target_dir"] = ask(
|
||||||
"Where do you want to put your " "web site on that server?",
|
"Where do you want to put your web site on that server?",
|
||||||
str,
|
str,
|
||||||
CONF["ssh_target_dir"],
|
CONF["ssh_target_dir"],
|
||||||
)
|
)
|
||||||
|
|
@ -338,23 +337,23 @@ needed by Pelican.
|
||||||
)
|
)
|
||||||
|
|
||||||
if ask(
|
if ask(
|
||||||
"Do you want to upload your website using " "Rackspace Cloud Files?",
|
"Do you want to upload your website using Rackspace Cloud Files?",
|
||||||
answer=bool,
|
answer=bool,
|
||||||
default=False,
|
default=False,
|
||||||
):
|
):
|
||||||
CONF["cloudfiles"] = (True,)
|
CONF["cloudfiles"] = (True,)
|
||||||
CONF["cloudfiles_username"] = ask(
|
CONF["cloudfiles_username"] = ask(
|
||||||
"What is your Rackspace " "Cloud username?",
|
"What is your Rackspace Cloud username?",
|
||||||
str,
|
str,
|
||||||
CONF["cloudfiles_username"],
|
CONF["cloudfiles_username"],
|
||||||
)
|
)
|
||||||
CONF["cloudfiles_api_key"] = ask(
|
CONF["cloudfiles_api_key"] = ask(
|
||||||
"What is your Rackspace " "Cloud API key?",
|
"What is your Rackspace Cloud API key?",
|
||||||
str,
|
str,
|
||||||
CONF["cloudfiles_api_key"],
|
CONF["cloudfiles_api_key"],
|
||||||
)
|
)
|
||||||
CONF["cloudfiles_container"] = ask(
|
CONF["cloudfiles_container"] = ask(
|
||||||
"What is the name of your " "Cloud Files container?",
|
"What is the name of your Cloud Files container?",
|
||||||
str,
|
str,
|
||||||
CONF["cloudfiles_container"],
|
CONF["cloudfiles_container"],
|
||||||
)
|
)
|
||||||
|
|
@ -384,7 +383,7 @@ needed by Pelican.
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
print(f"Error: {e}")
|
print(f"Error: {e}")
|
||||||
|
|
||||||
conf_python = dict()
|
conf_python = {}
|
||||||
for key, value in CONF.items():
|
for key, value in CONF.items():
|
||||||
conf_python[key] = repr(value)
|
conf_python[key] = repr(value)
|
||||||
render_jinja_template("pelicanconf.py.jinja2", conf_python, "pelicanconf.py")
|
render_jinja_template("pelicanconf.py.jinja2", conf_python, "pelicanconf.py")
|
||||||
|
|
|
||||||
|
|
@ -115,11 +115,10 @@ class URLWrapper:
|
||||||
if not isinstance(value, str):
|
if not isinstance(value, str):
|
||||||
logger.warning("%s is set to %s", setting, value)
|
logger.warning("%s is set to %s", setting, value)
|
||||||
return value
|
return value
|
||||||
|
elif get_page_name:
|
||||||
|
return os.path.splitext(value)[0].format(**self.as_dict())
|
||||||
else:
|
else:
|
||||||
if get_page_name:
|
return value.format(**self.as_dict())
|
||||||
return os.path.splitext(value)[0].format(**self.as_dict())
|
|
||||||
else:
|
|
||||||
return value.format(**self.as_dict())
|
|
||||||
|
|
||||||
page_name = property(
|
page_name = property(
|
||||||
functools.partial(_from_settings, key="URL", get_page_name=True)
|
functools.partial(_from_settings, key="URL", get_page_name=True)
|
||||||
|
|
|
||||||
|
|
@ -25,9 +25,7 @@ from typing import (
|
||||||
Collection,
|
Collection,
|
||||||
Generator,
|
Generator,
|
||||||
Iterable,
|
Iterable,
|
||||||
Optional,
|
|
||||||
Sequence,
|
Sequence,
|
||||||
Union,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
import dateutil.parser
|
import dateutil.parser
|
||||||
|
|
@ -167,7 +165,7 @@ class memoized:
|
||||||
self.cache[args] = value
|
self.cache[args] = value
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def __repr__(self) -> Optional[str]:
|
def __repr__(self) -> str | None:
|
||||||
return self.func.__doc__
|
return self.func.__doc__
|
||||||
|
|
||||||
def __get__(self, obj: Any, objtype):
|
def __get__(self, obj: Any, objtype):
|
||||||
|
|
@ -181,8 +179,8 @@ def deprecated_attribute(
|
||||||
old: str,
|
old: str,
|
||||||
new: str,
|
new: str,
|
||||||
since: tuple[int, ...],
|
since: tuple[int, ...],
|
||||||
remove: Optional[tuple[int, ...]] = None,
|
remove: tuple[int, ...] | None = None,
|
||||||
doc: Optional[str] = None,
|
doc: str | None = None,
|
||||||
):
|
):
|
||||||
"""Attribute deprecation decorator for gentle upgrades
|
"""Attribute deprecation decorator for gentle upgrades
|
||||||
|
|
||||||
|
|
@ -296,9 +294,7 @@ def slugify(
|
||||||
return value.strip()
|
return value.strip()
|
||||||
|
|
||||||
|
|
||||||
def copy(
|
def copy(source: str, destination: str, ignores: Iterable[str] | None = None) -> None:
|
||||||
source: str, destination: str, ignores: Optional[Iterable[str]] = None
|
|
||||||
) -> None:
|
|
||||||
"""Recursively copy source into destination.
|
"""Recursively copy source into destination.
|
||||||
|
|
||||||
If source is a file, destination has to be a file as well.
|
If source is a file, destination has to be a file as well.
|
||||||
|
|
@ -364,7 +360,7 @@ def copy(
|
||||||
copy_file(src_path, dst_path)
|
copy_file(src_path, dst_path)
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Skipped copy %s (not a file or " "directory) to %s",
|
"Skipped copy %s (not a file or directory) to %s",
|
||||||
src_path,
|
src_path,
|
||||||
dst_path,
|
dst_path,
|
||||||
)
|
)
|
||||||
|
|
@ -474,7 +470,7 @@ class _HTMLWordTruncator(HTMLParser):
|
||||||
self.words_found = 0
|
self.words_found = 0
|
||||||
self.open_tags = []
|
self.open_tags = []
|
||||||
self.last_word_end = None
|
self.last_word_end = None
|
||||||
self.truncate_at: Optional[int] = None
|
self.truncate_at: int | None = None
|
||||||
|
|
||||||
def feed(self, *args, **kwargs) -> None:
|
def feed(self, *args, **kwargs) -> None:
|
||||||
try:
|
try:
|
||||||
|
|
@ -573,11 +569,10 @@ class _HTMLWordTruncator(HTMLParser):
|
||||||
if self.last_word_end is None:
|
if self.last_word_end is None:
|
||||||
if self._word_prefix_regex.match(char):
|
if self._word_prefix_regex.match(char):
|
||||||
self.last_word_end = ref_end
|
self.last_word_end = ref_end
|
||||||
|
elif self._word_regex.match(char):
|
||||||
|
self.last_word_end = ref_end
|
||||||
else:
|
else:
|
||||||
if self._word_regex.match(char):
|
self.add_last_word()
|
||||||
self.last_word_end = ref_end
|
|
||||||
else:
|
|
||||||
self.add_last_word()
|
|
||||||
|
|
||||||
def handle_entityref(self, name: str) -> None:
|
def handle_entityref(self, name: str) -> None:
|
||||||
"""
|
"""
|
||||||
|
|
@ -638,7 +633,7 @@ def truncate_html_words(s: str, num: int, end_text: str = "…") -> str:
|
||||||
|
|
||||||
def process_translations(
|
def process_translations(
|
||||||
content_list: list[Content],
|
content_list: list[Content],
|
||||||
translation_id: Optional[Union[str, Collection[str]]] = None,
|
translation_id: str | Collection[str] | None = None,
|
||||||
) -> tuple[list[Content], list[Content]]:
|
) -> tuple[list[Content], list[Content]]:
|
||||||
"""Finds translations and returns them.
|
"""Finds translations and returns them.
|
||||||
|
|
||||||
|
|
@ -739,7 +734,7 @@ def get_original_items(items: list[Content], with_str: str) -> list[Content]:
|
||||||
|
|
||||||
def order_content(
|
def order_content(
|
||||||
content_list: list[Content],
|
content_list: list[Content],
|
||||||
order_by: Union[str, Callable[[Content], Any], None] = "slug",
|
order_by: str | Callable[[Content], Any] | None = "slug",
|
||||||
) -> list[Content]:
|
) -> list[Content]:
|
||||||
"""Sorts content.
|
"""Sorts content.
|
||||||
|
|
||||||
|
|
@ -841,7 +836,7 @@ def wait_for_changes(
|
||||||
|
|
||||||
|
|
||||||
def set_date_tzinfo(
|
def set_date_tzinfo(
|
||||||
d: datetime.datetime, tz_name: Optional[str] = None
|
d: datetime.datetime, tz_name: str | None = None
|
||||||
) -> datetime.datetime:
|
) -> datetime.datetime:
|
||||||
"""Set the timezone for dates that don't have tzinfo"""
|
"""Set the timezone for dates that don't have tzinfo"""
|
||||||
if tz_name and not d.tzinfo:
|
if tz_name and not d.tzinfo:
|
||||||
|
|
@ -857,7 +852,7 @@ def mkdir_p(path: str) -> None:
|
||||||
os.makedirs(path, exist_ok=True)
|
os.makedirs(path, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
def split_all(path: Union[str, pathlib.Path, None]) -> Optional[Sequence[str]]:
|
def split_all(path: str | pathlib.Path | None) -> Sequence[str] | None:
|
||||||
"""Split a path into a list of components
|
"""Split a path into a list of components
|
||||||
|
|
||||||
While os.path.split() splits a single component off the back of
|
While os.path.split() splits a single component off the back of
|
||||||
|
|
@ -911,7 +906,7 @@ def maybe_pluralize(count: int, singular: str, plural: str) -> str:
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def temporary_locale(
|
def temporary_locale(
|
||||||
temp_locale: Optional[str] = None, lc_category: int = locale.LC_ALL
|
temp_locale: str | None = None, lc_category: int = locale.LC_ALL
|
||||||
) -> Generator[None, None, None]:
|
) -> Generator[None, None, None]:
|
||||||
"""
|
"""
|
||||||
Enable code to run in a context with a temporary locale
|
Enable code to run in a context with a temporary locale
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
|
||||||
class Writer:
|
class Writer:
|
||||||
def __init__(self, output_path, settings=None):
|
def __init__(self, output_path, settings=None):
|
||||||
self.output_path = output_path
|
self.output_path = output_path
|
||||||
self.reminder = dict()
|
self.reminder = {}
|
||||||
self.settings = settings or {}
|
self.settings = settings or {}
|
||||||
self._written_files = set()
|
self._written_files = set()
|
||||||
self._overridden_files = set()
|
self._overridden_files = set()
|
||||||
|
|
|
||||||
|
|
@ -182,14 +182,17 @@ ignore = [
|
||||||
"INP001", # implicit-namespace-package
|
"INP001", # implicit-namespace-package
|
||||||
"RUF015", # unnecessary-iterable-allocation-for-first-element
|
"RUF015", # unnecessary-iterable-allocation-for-first-element
|
||||||
"PLR1722", # sys-exit-alias
|
"PLR1722", # sys-exit-alias
|
||||||
|
# ruff-format wants us to ignore ISC001. I don't love that, but okay.
|
||||||
|
# "warning: The following rules may cause conflicts when used with the formatter:
|
||||||
|
# `ISC001`. To avoid unexpected behavior, we recommend disabling these rules,
|
||||||
|
# either by removing them from the `select` or `extend-select` configuration,
|
||||||
|
# or adding them to the `ignore` configuration."
|
||||||
"ISC001", # single-line-implicit-string-concatenation
|
"ISC001", # single-line-implicit-string-concatenation
|
||||||
"C408", # unnecessary-collection-call
|
|
||||||
"B904", # raise-without-from-inside-except
|
"B904", # raise-without-from-inside-except
|
||||||
"UP007", # use `|` operator for union type annotations (PEP 604)
|
|
||||||
"UP031", # printf-string-formatting
|
"UP031", # printf-string-formatting
|
||||||
"PLR5501", # collapsible-else-if
|
# PERF203 has minimal performance impact, and you have to catch the exception
|
||||||
|
# inside the loop if you want to ignore it, so let's ignore PERF203.
|
||||||
"PERF203", # try-except-in-loop
|
"PERF203", # try-except-in-loop
|
||||||
"B006", # mutable-argument-default
|
|
||||||
# TODO: these only have one violation each in Dec 2023:
|
# TODO: these only have one violation each in Dec 2023:
|
||||||
"SLOT000", # no-slots-in-str-subclass
|
"SLOT000", # no-slots-in-str-subclass
|
||||||
"PYI024", # collections-named-tuple
|
"PYI024", # collections-named-tuple
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue