mirror of
https://github.com/getpelican/pelican.git
synced 2025-10-15 20:28:56 +02:00
More ruff fixes in files: stop ignoring C408, UP007, PLR5501, B006
This commit is contained in:
parent
3624bcdbf4
commit
7577dd7603
16 changed files with 72 additions and 82 deletions
|
|
@ -417,7 +417,7 @@ def parse_arguments(argv=None):
|
|||
"--relative-urls",
|
||||
dest="relative_paths",
|
||||
action="store_true",
|
||||
help="Use relative urls in output, " "useful for site development",
|
||||
help="Use relative urls in output, useful for site development",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
|
|
@ -433,7 +433,7 @@ def parse_arguments(argv=None):
|
|||
"--ignore-cache",
|
||||
action="store_true",
|
||||
dest="ignore_cache",
|
||||
help="Ignore content cache " "from previous runs by not loading cache files.",
|
||||
help="Ignore content cache from previous runs by not loading cache files.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
|
|
@ -488,7 +488,7 @@ def parse_arguments(argv=None):
|
|||
"-b",
|
||||
"--bind",
|
||||
dest="bind",
|
||||
help="IP to bind to when serving files via HTTP " "(default: 127.0.0.1)",
|
||||
help="IP to bind to when serving files via HTTP (default: 127.0.0.1)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class Content:
|
|||
self._context = context
|
||||
self.translations = []
|
||||
|
||||
local_metadata = dict()
|
||||
local_metadata = {}
|
||||
local_metadata.update(metadata)
|
||||
|
||||
# set metadata as attributes
|
||||
|
|
@ -357,7 +357,7 @@ class Content:
|
|||
origin = joiner(siteurl, Author(path, self.settings).url)
|
||||
else:
|
||||
logger.warning(
|
||||
"Replacement Indicator '%s' not recognized, " "skipping replacement",
|
||||
"Replacement Indicator '%s' not recognized, skipping replacement",
|
||||
what,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ class Generator:
|
|||
|
||||
return False
|
||||
|
||||
def get_files(self, paths, exclude=[], extensions=None):
|
||||
def get_files(self, paths, exclude=None, extensions=None):
|
||||
"""Return a list of files to use, based on rules
|
||||
|
||||
:param paths: the list pf paths to search (relative to self.path)
|
||||
|
|
@ -164,6 +164,8 @@ class Generator:
|
|||
:param extensions: the list of allowed extensions (if False, all
|
||||
extensions are allowed)
|
||||
"""
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
# backward compatibility for older generators
|
||||
if isinstance(paths, str):
|
||||
paths = [paths]
|
||||
|
|
@ -1068,7 +1070,7 @@ class StaticGenerator(Generator):
|
|||
except OSError as err:
|
||||
if err.errno == errno.EXDEV: # 18: Invalid cross-device link
|
||||
logger.debug(
|
||||
"Cross-device links not valid. " "Creating symbolic links instead."
|
||||
"Cross-device links not valid. Creating symbolic links instead."
|
||||
)
|
||||
self.fallback_to_symlinks = True
|
||||
self._link_staticfile(sc)
|
||||
|
|
|
|||
|
|
@ -131,9 +131,8 @@ class Page:
|
|||
if not self.has_next():
|
||||
rule = p
|
||||
break
|
||||
else:
|
||||
if p.min_page <= self.number:
|
||||
rule = p
|
||||
elif p.min_page <= self.number:
|
||||
rule = p
|
||||
|
||||
if not rule:
|
||||
return ""
|
||||
|
|
|
|||
|
|
@ -199,7 +199,7 @@ class RstReader(BaseReader):
|
|||
self._language_code = lang_code
|
||||
else:
|
||||
logger.warning(
|
||||
"Docutils has no localization for '%s'." " Using 'en' instead.",
|
||||
"Docutils has no localization for '%s'. Using 'en' instead.",
|
||||
lang_code,
|
||||
)
|
||||
self._language_code = "en"
|
||||
|
|
@ -320,7 +320,7 @@ class MarkdownReader(BaseReader):
|
|||
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
|
||||
if len(value) > 1:
|
||||
logger.warning(
|
||||
"Duplicate definition of `%s` " "for %s. Using first one.",
|
||||
"Duplicate definition of `%s` for %s. Using first one.",
|
||||
name,
|
||||
self._source_path,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ class abbreviation(nodes.Inline, nodes.TextElement):
|
|||
pass
|
||||
|
||||
|
||||
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
|
||||
def abbr_role(typ, rawtext, text, lineno, inliner, options=None, content=None):
|
||||
text = utils.unescape(text)
|
||||
m = _abbr_re.search(text)
|
||||
if m is None:
|
||||
|
|
|
|||
|
|
@ -447,7 +447,7 @@ def handle_deprecated_settings(settings: Settings) -> Settings:
|
|||
and not isinstance(settings[key], Path)
|
||||
and "%s" in settings[key]
|
||||
):
|
||||
logger.warning("%%s usage in %s is deprecated, use {lang} " "instead.", key)
|
||||
logger.warning("%%s usage in %s is deprecated, use {lang} instead.", key)
|
||||
try:
|
||||
settings[key] = _printf_s_to_format_field(settings[key], "lang")
|
||||
except ValueError:
|
||||
|
|
@ -470,7 +470,7 @@ def handle_deprecated_settings(settings: Settings) -> Settings:
|
|||
and not isinstance(settings[key], Path)
|
||||
and "%s" in settings[key]
|
||||
):
|
||||
logger.warning("%%s usage in %s is deprecated, use {slug} " "instead.", key)
|
||||
logger.warning("%%s usage in %s is deprecated, use {slug} instead.", key)
|
||||
try:
|
||||
settings[key] = _printf_s_to_format_field(settings[key], "slug")
|
||||
except ValueError:
|
||||
|
|
@ -614,7 +614,7 @@ def configure_settings(settings: Settings) -> Settings:
|
|||
if key in settings and not isinstance(settings[key], types):
|
||||
value = settings.pop(key)
|
||||
logger.warn(
|
||||
"Detected misconfigured %s (%s), " "falling back to the default (%s)",
|
||||
"Detected misconfigured %s (%s), falling back to the default (%s)",
|
||||
key,
|
||||
value,
|
||||
DEFAULT_CONFIG[key],
|
||||
|
|
@ -676,7 +676,7 @@ def configure_settings(settings: Settings) -> Settings:
|
|||
if any(settings.get(k) for k in feed_keys):
|
||||
if not settings.get("SITEURL"):
|
||||
logger.warning(
|
||||
"Feeds generated without SITEURL set properly may" " not be valid"
|
||||
"Feeds generated without SITEURL set properly may not be valid"
|
||||
)
|
||||
|
||||
if "TIMEZONE" not in settings:
|
||||
|
|
|
|||
|
|
@ -314,7 +314,7 @@ class TestPage(TestBase):
|
|||
args["settings"] = settings
|
||||
|
||||
# Tag
|
||||
args["content"] = "A simple test, with a " '<a href="|tag|tagname">link</a>'
|
||||
args["content"] = 'A simple test, with a <a href="|tag|tagname">link</a>'
|
||||
page = Page(**args)
|
||||
content = page.get_content("http://notmyidea.org")
|
||||
self.assertEqual(
|
||||
|
|
@ -326,9 +326,7 @@ class TestPage(TestBase):
|
|||
)
|
||||
|
||||
# Category
|
||||
args["content"] = (
|
||||
"A simple test, with a " '<a href="|category|category">link</a>'
|
||||
)
|
||||
args["content"] = 'A simple test, with a <a href="|category|category">link</a>'
|
||||
page = Page(**args)
|
||||
content = page.get_content("http://notmyidea.org")
|
||||
self.assertEqual(
|
||||
|
|
@ -350,7 +348,7 @@ class TestPage(TestBase):
|
|||
|
||||
# Classic intrasite link via filename
|
||||
args["content"] = (
|
||||
"A simple test, with a " '<a href="|filename|article.rst">link</a>'
|
||||
'A simple test, with a <a href="|filename|article.rst">link</a>'
|
||||
)
|
||||
content = Page(**args).get_content("http://notmyidea.org")
|
||||
self.assertEqual(
|
||||
|
|
@ -401,7 +399,7 @@ class TestPage(TestBase):
|
|||
|
||||
# also test for summary in metadata
|
||||
parsed = (
|
||||
"A simple summary test, with a " '<a href="|filename|article.rst">link</a>'
|
||||
'A simple summary test, with a <a href="|filename|article.rst">link</a>'
|
||||
)
|
||||
linked = (
|
||||
"A simple summary test, with a "
|
||||
|
|
@ -594,7 +592,7 @@ class TestPage(TestBase):
|
|||
|
||||
# An intrasite link via filename with %20 as a space
|
||||
args["content"] = (
|
||||
"A simple test, with a " '<a href="|filename|article%20spaces.rst">link</a>'
|
||||
'A simple test, with a <a href="|filename|article%20spaces.rst">link</a>'
|
||||
)
|
||||
content = Page(**args).get_content("http://notmyidea.org")
|
||||
self.assertEqual(
|
||||
|
|
@ -834,10 +832,10 @@ class TestStatic(LoggedTestCase):
|
|||
|
||||
otherdir_settings = self.settings.copy()
|
||||
otherdir_settings.update(
|
||||
dict(
|
||||
PAGE_SAVE_AS=os.path.join("otherpages", "{slug}.html"),
|
||||
PAGE_URL="otherpages/{slug}.html",
|
||||
)
|
||||
{
|
||||
"PAGE_SAVE_AS": os.path.join("otherpages", "{slug}.html"),
|
||||
"PAGE_URL": "otherpages/{slug}.html",
|
||||
}
|
||||
)
|
||||
otherdir_page = Page(
|
||||
content="other page",
|
||||
|
|
@ -892,7 +890,7 @@ class TestStatic(LoggedTestCase):
|
|||
"""
|
||||
customstatic = Static(
|
||||
content=None,
|
||||
metadata=dict(save_as="customfoo.jpg", url="customfoo.jpg"),
|
||||
metadata={"save_as": "customfoo.jpg", "url": "customfoo.jpg"},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join("dir", "foo.jpg"),
|
||||
context=self.settings.copy(),
|
||||
|
|
@ -1066,9 +1064,9 @@ class TestStatic(LoggedTestCase):
|
|||
|
||||
static = Static(
|
||||
content=None,
|
||||
metadata=dict(
|
||||
status="draft",
|
||||
),
|
||||
metadata={
|
||||
"status": "draft",
|
||||
},
|
||||
settings=self.settings,
|
||||
source_path=os.path.join("dir", "foo.jpg"),
|
||||
context=self.settings.copy(),
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ class TestBloggerXmlImporter(TestCaseWithCLocale):
|
|||
)
|
||||
comment_titles = {x[0] for x in test_posts if x[8] == "comment"}
|
||||
self.assertEqual(
|
||||
{"Mishka, always a pleasure to read your " "adventures!..."}, comment_titles
|
||||
{"Mishka, always a pleasure to read your adventures!..."}, comment_titles
|
||||
)
|
||||
|
||||
def test_recognise_status_with_correct_filename(self):
|
||||
|
|
@ -478,7 +478,7 @@ class TestBuildHeader(unittest.TestCase):
|
|||
attachments=["output/test1", "output/test2"],
|
||||
)
|
||||
self.assertEqual(
|
||||
header, ("test\n####\n" ":attachments: output/test1, " "output/test2\n\n")
|
||||
header, ("test\n####\n:attachments: output/test1, output/test2\n\n")
|
||||
)
|
||||
|
||||
def test_galleries_added_to_markdown_header(self):
|
||||
|
|
@ -521,10 +521,10 @@ class TestWordpressXMLAttachements(TestCaseWithCLocale):
|
|||
self.assertEqual(self.attachments[post], expected)
|
||||
elif post == "with-excerpt":
|
||||
expected_invalid = (
|
||||
"http://thisurlisinvalid.notarealdomain/" "not_an_image.jpg"
|
||||
"http://thisurlisinvalid.notarealdomain/not_an_image.jpg"
|
||||
)
|
||||
expected_pelikan = (
|
||||
"http://en.wikipedia.org/wiki/" "File:Pelikan_Walvis_Bay.jpg"
|
||||
"http://en.wikipedia.org/wiki/File:Pelikan_Walvis_Bay.jpg"
|
||||
)
|
||||
self.assertEqual(
|
||||
self.attachments[post], {expected_invalid, expected_pelikan}
|
||||
|
|
@ -533,9 +533,7 @@ class TestWordpressXMLAttachements(TestCaseWithCLocale):
|
|||
expected_invalid = "http://thisurlisinvalid.notarealdomain"
|
||||
self.assertEqual(self.attachments[post], {expected_invalid})
|
||||
else:
|
||||
self.fail(
|
||||
"all attachments should match to a " f"filename or None, {post}"
|
||||
)
|
||||
self.fail(f"all attachments should match to a filename or None, {post}")
|
||||
|
||||
def test_download_attachments(self):
|
||||
real_file = os.path.join(CUR_DIR, "content/article.rst")
|
||||
|
|
|
|||
|
|
@ -591,7 +591,7 @@ class MdReaderTest(ReaderTest):
|
|||
"modified": SafeDatetime(2012, 11, 1),
|
||||
"multiline": [
|
||||
"Line Metadata should be handle properly.",
|
||||
"See syntax of Meta-Data extension of " "Python Markdown package:",
|
||||
"See syntax of Meta-Data extension of Python Markdown package:",
|
||||
"If a line is indented by 4 or more spaces,",
|
||||
"that line is assumed to be an additional line of the value",
|
||||
"for the previous keyword.",
|
||||
|
|
|
|||
|
|
@ -922,14 +922,14 @@ class TestSanitisedJoin(unittest.TestCase):
|
|||
def test_detect_parent_breakout(self):
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"Attempted to break out of output directory to " "(.*?:)?/foo/test",
|
||||
"Attempted to break out of output directory to (.*?:)?/foo/test",
|
||||
): # (.*?:)? accounts for Windows root
|
||||
utils.sanitised_join("/foo/bar", "../test")
|
||||
|
||||
def test_detect_root_breakout(self):
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"Attempted to break out of output directory to " "(.*?:)?/test",
|
||||
"Attempted to break out of output directory to (.*?:)?/test",
|
||||
): # (.*?:)? accounts for Windows root
|
||||
utils.sanitised_join("/foo/bar", "/test")
|
||||
|
||||
|
|
|
|||
|
|
@ -1095,7 +1095,7 @@ def fields2pelican(
|
|||
|
||||
if posts_require_pandoc:
|
||||
logger.error(
|
||||
"Pandoc must be installed to import the following posts:" "\n {}".format(
|
||||
"Pandoc must be installed to import the following posts:\n {}".format(
|
||||
"\n ".join(posts_require_pandoc)
|
||||
)
|
||||
)
|
||||
|
|
@ -1232,7 +1232,7 @@ def main():
|
|||
exit(error)
|
||||
|
||||
if args.wp_attach and input_type != "wordpress":
|
||||
error = "You must be importing a wordpress xml " "to use the --wp-attach option"
|
||||
error = "You must be importing a wordpress xml to use the --wp-attach option"
|
||||
exit(error)
|
||||
|
||||
if input_type == "blogger":
|
||||
|
|
|
|||
|
|
@ -103,11 +103,10 @@ def ask(question, answer=str, default=None, length=None):
|
|||
break
|
||||
else:
|
||||
print("You must enter something")
|
||||
elif length and len(r) != length:
|
||||
print(f"Entry must be {length} characters long")
|
||||
else:
|
||||
if length and len(r) != length:
|
||||
print(f"Entry must be {length} characters long")
|
||||
else:
|
||||
break
|
||||
break
|
||||
|
||||
return r
|
||||
|
||||
|
|
@ -169,7 +168,7 @@ def ask_timezone(question, default, tzurl):
|
|||
r = tz_dict[r]
|
||||
break
|
||||
else:
|
||||
print("Please enter a valid time zone:\n" f" (check [{tzurl}])")
|
||||
print(f"Please enter a valid time zone:\n (check [{tzurl}])")
|
||||
return r
|
||||
|
||||
|
||||
|
|
@ -253,7 +252,7 @@ needed by Pelican.
|
|||
default=True,
|
||||
):
|
||||
CONF["siteurl"] = ask(
|
||||
"What is your URL prefix? (see " "above example; no trailing slash)",
|
||||
"What is your URL prefix? (see above example; no trailing slash)",
|
||||
str,
|
||||
CONF["siteurl"],
|
||||
)
|
||||
|
|
@ -266,7 +265,7 @@ needed by Pelican.
|
|||
|
||||
if CONF["with_pagination"]:
|
||||
CONF["default_pagination"] = ask(
|
||||
"How many articles per page " "do you want?",
|
||||
"How many articles per page do you want?",
|
||||
int,
|
||||
CONF["default_pagination"],
|
||||
)
|
||||
|
|
@ -296,7 +295,7 @@ needed by Pelican.
|
|||
"What is your username on that server?", str, CONF["ftp_user"]
|
||||
)
|
||||
CONF["ftp_target_dir"] = ask(
|
||||
"Where do you want to put your " "web site on that server?",
|
||||
"Where do you want to put your web site on that server?",
|
||||
str,
|
||||
CONF["ftp_target_dir"],
|
||||
)
|
||||
|
|
@ -314,7 +313,7 @@ needed by Pelican.
|
|||
"What is your username on that server?", str, CONF["ssh_user"]
|
||||
)
|
||||
CONF["ssh_target_dir"] = ask(
|
||||
"Where do you want to put your " "web site on that server?",
|
||||
"Where do you want to put your web site on that server?",
|
||||
str,
|
||||
CONF["ssh_target_dir"],
|
||||
)
|
||||
|
|
@ -338,23 +337,23 @@ needed by Pelican.
|
|||
)
|
||||
|
||||
if ask(
|
||||
"Do you want to upload your website using " "Rackspace Cloud Files?",
|
||||
"Do you want to upload your website using Rackspace Cloud Files?",
|
||||
answer=bool,
|
||||
default=False,
|
||||
):
|
||||
CONF["cloudfiles"] = (True,)
|
||||
CONF["cloudfiles_username"] = ask(
|
||||
"What is your Rackspace " "Cloud username?",
|
||||
"What is your Rackspace Cloud username?",
|
||||
str,
|
||||
CONF["cloudfiles_username"],
|
||||
)
|
||||
CONF["cloudfiles_api_key"] = ask(
|
||||
"What is your Rackspace " "Cloud API key?",
|
||||
"What is your Rackspace Cloud API key?",
|
||||
str,
|
||||
CONF["cloudfiles_api_key"],
|
||||
)
|
||||
CONF["cloudfiles_container"] = ask(
|
||||
"What is the name of your " "Cloud Files container?",
|
||||
"What is the name of your Cloud Files container?",
|
||||
str,
|
||||
CONF["cloudfiles_container"],
|
||||
)
|
||||
|
|
@ -384,7 +383,7 @@ needed by Pelican.
|
|||
except OSError as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
conf_python = dict()
|
||||
conf_python = {}
|
||||
for key, value in CONF.items():
|
||||
conf_python[key] = repr(value)
|
||||
render_jinja_template("pelicanconf.py.jinja2", conf_python, "pelicanconf.py")
|
||||
|
|
|
|||
|
|
@ -115,11 +115,10 @@ class URLWrapper:
|
|||
if not isinstance(value, str):
|
||||
logger.warning("%s is set to %s", setting, value)
|
||||
return value
|
||||
elif get_page_name:
|
||||
return os.path.splitext(value)[0].format(**self.as_dict())
|
||||
else:
|
||||
if get_page_name:
|
||||
return os.path.splitext(value)[0].format(**self.as_dict())
|
||||
else:
|
||||
return value.format(**self.as_dict())
|
||||
return value.format(**self.as_dict())
|
||||
|
||||
page_name = property(
|
||||
functools.partial(_from_settings, key="URL", get_page_name=True)
|
||||
|
|
|
|||
|
|
@ -25,9 +25,7 @@ from typing import (
|
|||
Collection,
|
||||
Generator,
|
||||
Iterable,
|
||||
Optional,
|
||||
Sequence,
|
||||
Union,
|
||||
)
|
||||
|
||||
import dateutil.parser
|
||||
|
|
@ -167,7 +165,7 @@ class memoized:
|
|||
self.cache[args] = value
|
||||
return value
|
||||
|
||||
def __repr__(self) -> Optional[str]:
|
||||
def __repr__(self) -> str | None:
|
||||
return self.func.__doc__
|
||||
|
||||
def __get__(self, obj: Any, objtype):
|
||||
|
|
@ -181,8 +179,8 @@ def deprecated_attribute(
|
|||
old: str,
|
||||
new: str,
|
||||
since: tuple[int, ...],
|
||||
remove: Optional[tuple[int, ...]] = None,
|
||||
doc: Optional[str] = None,
|
||||
remove: tuple[int, ...] | None = None,
|
||||
doc: str | None = None,
|
||||
):
|
||||
"""Attribute deprecation decorator for gentle upgrades
|
||||
|
||||
|
|
@ -296,9 +294,7 @@ def slugify(
|
|||
return value.strip()
|
||||
|
||||
|
||||
def copy(
|
||||
source: str, destination: str, ignores: Optional[Iterable[str]] = None
|
||||
) -> None:
|
||||
def copy(source: str, destination: str, ignores: Iterable[str] | None = None) -> None:
|
||||
"""Recursively copy source into destination.
|
||||
|
||||
If source is a file, destination has to be a file as well.
|
||||
|
|
@ -364,7 +360,7 @@ def copy(
|
|||
copy_file(src_path, dst_path)
|
||||
else:
|
||||
logger.warning(
|
||||
"Skipped copy %s (not a file or " "directory) to %s",
|
||||
"Skipped copy %s (not a file or directory) to %s",
|
||||
src_path,
|
||||
dst_path,
|
||||
)
|
||||
|
|
@ -474,7 +470,7 @@ class _HTMLWordTruncator(HTMLParser):
|
|||
self.words_found = 0
|
||||
self.open_tags = []
|
||||
self.last_word_end = None
|
||||
self.truncate_at: Optional[int] = None
|
||||
self.truncate_at: int | None = None
|
||||
|
||||
def feed(self, *args, **kwargs) -> None:
|
||||
try:
|
||||
|
|
@ -573,11 +569,10 @@ class _HTMLWordTruncator(HTMLParser):
|
|||
if self.last_word_end is None:
|
||||
if self._word_prefix_regex.match(char):
|
||||
self.last_word_end = ref_end
|
||||
elif self._word_regex.match(char):
|
||||
self.last_word_end = ref_end
|
||||
else:
|
||||
if self._word_regex.match(char):
|
||||
self.last_word_end = ref_end
|
||||
else:
|
||||
self.add_last_word()
|
||||
self.add_last_word()
|
||||
|
||||
def handle_entityref(self, name: str) -> None:
|
||||
"""
|
||||
|
|
@ -638,7 +633,7 @@ def truncate_html_words(s: str, num: int, end_text: str = "…") -> str:
|
|||
|
||||
def process_translations(
|
||||
content_list: list[Content],
|
||||
translation_id: Optional[Union[str, Collection[str]]] = None,
|
||||
translation_id: str | Collection[str] | None = None,
|
||||
) -> tuple[list[Content], list[Content]]:
|
||||
"""Finds translations and returns them.
|
||||
|
||||
|
|
@ -739,7 +734,7 @@ def get_original_items(items: list[Content], with_str: str) -> list[Content]:
|
|||
|
||||
def order_content(
|
||||
content_list: list[Content],
|
||||
order_by: Union[str, Callable[[Content], Any], None] = "slug",
|
||||
order_by: str | Callable[[Content], Any] | None = "slug",
|
||||
) -> list[Content]:
|
||||
"""Sorts content.
|
||||
|
||||
|
|
@ -841,7 +836,7 @@ def wait_for_changes(
|
|||
|
||||
|
||||
def set_date_tzinfo(
|
||||
d: datetime.datetime, tz_name: Optional[str] = None
|
||||
d: datetime.datetime, tz_name: str | None = None
|
||||
) -> datetime.datetime:
|
||||
"""Set the timezone for dates that don't have tzinfo"""
|
||||
if tz_name and not d.tzinfo:
|
||||
|
|
@ -857,7 +852,7 @@ def mkdir_p(path: str) -> None:
|
|||
os.makedirs(path, exist_ok=True)
|
||||
|
||||
|
||||
def split_all(path: Union[str, pathlib.Path, None]) -> Optional[Sequence[str]]:
|
||||
def split_all(path: str | pathlib.Path | None) -> Sequence[str] | None:
|
||||
"""Split a path into a list of components
|
||||
|
||||
While os.path.split() splits a single component off the back of
|
||||
|
|
@ -911,7 +906,7 @@ def maybe_pluralize(count: int, singular: str, plural: str) -> str:
|
|||
|
||||
@contextmanager
|
||||
def temporary_locale(
|
||||
temp_locale: Optional[str] = None, lc_category: int = locale.LC_ALL
|
||||
temp_locale: str | None = None, lc_category: int = locale.LC_ALL
|
||||
) -> Generator[None, None, None]:
|
||||
"""
|
||||
Enable code to run in a context with a temporary locale
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
|
|||
class Writer:
|
||||
def __init__(self, output_path, settings=None):
|
||||
self.output_path = output_path
|
||||
self.reminder = dict()
|
||||
self.reminder = {}
|
||||
self.settings = settings or {}
|
||||
self._written_files = set()
|
||||
self._overridden_files = set()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue