2019-11-05 23:17:19 -08:00
|
|
|
import datetime
|
2013-01-05 21:27:30 -05:00
|
|
|
import logging
|
2012-11-28 00:29:30 +01:00
|
|
|
import os
|
|
|
|
|
import re
|
2015-08-26 11:23:28 +02:00
|
|
|
from collections import OrderedDict
|
2019-11-17 19:19:37 +03:00
|
|
|
from html import escape
|
2019-11-05 23:17:19 -08:00
|
|
|
from html.parser import HTMLParser
|
|
|
|
|
from io import StringIO
|
2013-06-25 00:02:34 +02:00
|
|
|
|
2014-03-10 04:16:38 +01:00
|
|
|
import docutils
|
|
|
|
|
import docutils.core
|
|
|
|
|
import docutils.io
|
2018-11-25 22:11:05 +03:00
|
|
|
from docutils.parsers.rst.languages import get_language as get_docutils_lang
|
2017-06-06 20:34:56 +02:00
|
|
|
from docutils.writers.html4css1 import HTMLTranslator, Writer
|
2015-06-16 09:25:09 +02:00
|
|
|
|
2014-03-10 04:16:38 +01:00
|
|
|
from pelican import rstdirectives # NOQA
|
2015-06-16 09:25:09 +02:00
|
|
|
from pelican.cache import FileStampDataCacher
|
|
|
|
|
from pelican.contents import Author, Category, Page, Tag
|
2019-12-01 18:14:13 +03:00
|
|
|
from pelican.plugins import signals
|
2019-11-17 19:19:37 +03:00
|
|
|
from pelican.utils import get_date, pelican_open, posixize_path
|
2015-06-16 09:25:09 +02:00
|
|
|
|
2011-02-24 05:15:04 +00:00
|
|
|
try:
|
|
|
|
|
from markdown import Markdown
|
|
|
|
|
except ImportError:
|
2012-03-09 16:21:38 +01:00
|
|
|
Markdown = False # NOQA
|
2015-06-16 09:25:09 +02:00
|
|
|
|
|
|
|
|
# Metadata processors have no way to discard an unwanted value, so we have
|
|
|
|
|
# them return this value instead to signal that it should be discarded later.
|
|
|
|
|
# This means that _filter_discardable_metadata() must be called on processed
|
|
|
|
|
# metadata dicts before use, to remove the items with the special value.
|
|
|
|
|
_DISCARD = object()
|
2016-09-30 15:29:14 +02:00
|
|
|
|
|
|
|
|
DUPLICATES_DEFINITIONS_ALLOWED = {
|
|
|
|
|
'tags': False,
|
|
|
|
|
'date': False,
|
|
|
|
|
'modified': False,
|
|
|
|
|
'status': False,
|
|
|
|
|
'category': False,
|
|
|
|
|
'author': False,
|
|
|
|
|
'save_as': False,
|
2016-10-26 08:34:52 +02:00
|
|
|
'url': False,
|
2016-09-30 15:29:14 +02:00
|
|
|
'authors': False,
|
|
|
|
|
'slug': False
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-16 09:25:09 +02:00
|
|
|
METADATA_PROCESSORS = {
|
|
|
|
|
'tags': lambda x, y: ([
|
|
|
|
|
Tag(tag, y)
|
|
|
|
|
for tag in ensure_metadata_list(x)
|
|
|
|
|
] or _DISCARD),
|
|
|
|
|
'date': lambda x, y: get_date(x.replace('_', ' ')),
|
|
|
|
|
'modified': lambda x, y: get_date(x),
|
|
|
|
|
'status': lambda x, y: x.strip() or _DISCARD,
|
|
|
|
|
'category': lambda x, y: _process_if_nonempty(Category, x, y),
|
|
|
|
|
'author': lambda x, y: _process_if_nonempty(Author, x, y),
|
|
|
|
|
'authors': lambda x, y: ([
|
|
|
|
|
Author(author, y)
|
|
|
|
|
for author in ensure_metadata_list(x)
|
|
|
|
|
] or _DISCARD),
|
|
|
|
|
'slug': lambda x, y: x.strip() or _DISCARD,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
2010-10-30 00:56:40 +01:00
|
|
|
|
2015-03-21 21:54:06 -04:00
|
|
|
def ensure_metadata_list(text):
|
|
|
|
|
"""Canonicalize the format of a list of authors or tags. This works
|
|
|
|
|
the same way as Docutils' "authors" field: if it's already a list,
|
|
|
|
|
those boundaries are preserved; otherwise, it must be a string;
|
|
|
|
|
if the string contains semicolons, it is split on semicolons;
|
|
|
|
|
otherwise, it is split on commas. This allows you to write
|
|
|
|
|
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
|
|
|
|
|
format.
|
|
|
|
|
|
|
|
|
|
Regardless, all list items undergo .strip() before returning, and
|
|
|
|
|
empty items are discarded.
|
|
|
|
|
"""
|
2019-11-05 23:17:19 -08:00
|
|
|
if isinstance(text, str):
|
2015-03-21 21:54:06 -04:00
|
|
|
if ';' in text:
|
|
|
|
|
text = text.split(';')
|
|
|
|
|
else:
|
|
|
|
|
text = text.split(',')
|
2013-01-05 21:27:30 -05:00
|
|
|
|
2015-08-26 11:23:28 +02:00
|
|
|
return list(OrderedDict.fromkeys(
|
|
|
|
|
[v for v in (w.strip() for w in text) if v]
|
|
|
|
|
))
|
Ignore empty metadata. Fixes #1469. Fixes #1398.
Some metadata values cause problems when empty. For example, a markdown file
containing a Slug: line with no additional text causing Pelican to produce a
file named ".html" instead of generating a proper file name. Others, like
those created by a PATH_METADATA regex, must be preserved even if empty,
so things like PAGE_URL="filename{customvalue}.html" will always work.
Essentially, we want to discard empty metadata that we know will be useless
or problematic. This is better than raising an exception because (a) it
allows users to deliberately keep empty metadata in their source files for
filling in later, and (b) users shouldn't be forced to fix empty metadata
created by blog migration tools (see #1398).
The metadata processors are the ideal place to do this, because they know
the type of data they are handling and whether an empty value is wanted.
Unfortunately, they can't discard items, and neither can process_metadata(),
because their return values are always saved by calling code. We can't
safely change the calling code, because some of it lives in custom reader
classes out in the field, and we don't want to break those working systems.
Discarding empty values at the time of use isn't good enough, because that
still allows useless empty values in a source file to override configured
defaults.
My solution:
- When processing a list of values, a metadata processor will omit any
unwanted empty ones from the list it returns.
- When processing an entirely unwanted value, it will return something easily
identifiable that will pass through the reader code.
- When collecting the processed metadata, read_file() will filter out items
identified as unwanted.
These metadata are affected by this change:
author, authors, category, slug, status, tags.
I also removed a bit of now-superfluous code from generators.py that was
discarding empty authors at the time of use.
2014-09-29 22:51:13 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def _process_if_nonempty(processor, name, settings):
|
|
|
|
|
"""Removes extra whitespace from name and applies a metadata processor.
|
|
|
|
|
If name is empty or all whitespace, returns _DISCARD instead.
|
|
|
|
|
"""
|
|
|
|
|
name = name.strip()
|
|
|
|
|
return processor(name, settings) if name else _DISCARD
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _filter_discardable_metadata(metadata):
|
|
|
|
|
"""Return a copy of a dict, minus any items marked as discardable."""
|
|
|
|
|
return {name: val for name, val in metadata.items() if val is not _DISCARD}
|
|
|
|
|
|
|
|
|
|
|
2020-04-26 09:55:08 +02:00
|
|
|
class BaseReader:
|
2013-08-04 22:03:37 +02:00
|
|
|
"""Base class to read files.
|
|
|
|
|
|
|
|
|
|
This class is used to process static files, and it can be inherited for
|
|
|
|
|
other types of file. A Reader class must have the following attributes:
|
|
|
|
|
|
|
|
|
|
- enabled: (boolean) tell if the Reader class is enabled. It
|
|
|
|
|
generally depends on the import of some dependency.
|
|
|
|
|
- file_extensions: a list of file extensions that the Reader will process.
|
|
|
|
|
- extensions: a list of extensions to use in the reader (typical use is
|
|
|
|
|
Markdown).
|
|
|
|
|
|
|
|
|
|
"""
|
2011-02-24 05:15:04 +00:00
|
|
|
enabled = True
|
2013-01-04 15:57:58 -05:00
|
|
|
file_extensions = ['static']
|
2011-06-15 23:48:54 +00:00
|
|
|
extensions = None
|
2011-05-10 07:55:30 +06:00
|
|
|
|
2011-12-23 23:43:32 +00:00
|
|
|
def __init__(self, settings):
|
|
|
|
|
self.settings = settings
|
|
|
|
|
|
|
|
|
|
def process_metadata(self, name, value):
|
2013-03-12 12:19:53 -07:00
|
|
|
if name in METADATA_PROCESSORS:
|
|
|
|
|
return METADATA_PROCESSORS[name](value, self.settings)
|
2011-12-23 23:43:32 +00:00
|
|
|
return value
|
|
|
|
|
|
2013-01-04 15:57:58 -05:00
|
|
|
def read(self, source_path):
|
|
|
|
|
"No-op parser"
|
|
|
|
|
content = None
|
|
|
|
|
metadata = {}
|
|
|
|
|
return content, metadata
|
|
|
|
|
|
2012-03-09 16:21:38 +01:00
|
|
|
|
2011-05-10 07:55:30 +06:00
|
|
|
class _FieldBodyTranslator(HTMLTranslator):
|
|
|
|
|
|
2012-03-25 21:39:41 +04:00
|
|
|
def __init__(self, document):
|
2019-11-18 20:28:48 +03:00
|
|
|
super().__init__(document)
|
2012-03-25 21:39:41 +04:00
|
|
|
self.compact_p = None
|
|
|
|
|
|
2011-05-10 07:55:30 +06:00
|
|
|
def astext(self):
|
|
|
|
|
return ''.join(self.body)
|
|
|
|
|
|
|
|
|
|
def visit_field_body(self, node):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
def depart_field_body(self, node):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
2017-06-06 20:34:56 +02:00
|
|
|
def render_node_to_html(document, node, field_body_translator_class):
|
|
|
|
|
visitor = field_body_translator_class(document)
|
2011-05-10 07:55:30 +06:00
|
|
|
node.walkabout(visitor)
|
|
|
|
|
return visitor.astext()
|
|
|
|
|
|
2012-03-09 16:21:38 +01:00
|
|
|
|
2017-06-06 20:34:56 +02:00
|
|
|
class PelicanHTMLWriter(Writer):
|
|
|
|
|
|
|
|
|
|
def __init__(self):
|
2019-11-18 20:28:48 +03:00
|
|
|
super().__init__()
|
2017-06-06 20:34:56 +02:00
|
|
|
self.translator_class = PelicanHTMLTranslator
|
|
|
|
|
|
|
|
|
|
|
2012-07-17 13:30:06 +02:00
|
|
|
class PelicanHTMLTranslator(HTMLTranslator):
|
|
|
|
|
|
|
|
|
|
def visit_abbreviation(self, node):
|
|
|
|
|
attrs = {}
|
|
|
|
|
if node.hasattr('explanation'):
|
|
|
|
|
attrs['title'] = node['explanation']
|
|
|
|
|
self.body.append(self.starttag(node, 'abbr', '', **attrs))
|
|
|
|
|
|
|
|
|
|
def depart_abbreviation(self, node):
|
|
|
|
|
self.body.append('</abbr>')
|
|
|
|
|
|
2013-08-04 16:03:39 +01:00
|
|
|
def visit_image(self, node):
|
|
|
|
|
# set an empty alt if alt is not specified
|
|
|
|
|
# avoids that alt is taken from src
|
|
|
|
|
node['alt'] = node.get('alt', '')
|
|
|
|
|
return HTMLTranslator.visit_image(self, node)
|
|
|
|
|
|
2012-07-17 13:30:06 +02:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
class RstReader(BaseReader):
|
2017-06-06 20:34:56 +02:00
|
|
|
"""Reader for reStructuredText files
|
|
|
|
|
|
|
|
|
|
By default the output HTML is written using
|
|
|
|
|
docutils.writers.html4css1.Writer and translated using a subclass of
|
|
|
|
|
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
|
|
|
|
|
your own writer/translator (e.g. a HTML5-based one), pass your classes to
|
|
|
|
|
these two attributes. Look in the source code for details.
|
|
|
|
|
|
|
|
|
|
writer_class Used for writing contents
|
|
|
|
|
field_body_translator_class Used for translating metadata such
|
|
|
|
|
as article summary
|
|
|
|
|
|
|
|
|
|
"""
|
2013-08-04 22:03:37 +02:00
|
|
|
|
2011-05-10 07:55:30 +06:00
|
|
|
enabled = bool(docutils)
|
2012-05-01 22:34:32 -04:00
|
|
|
file_extensions = ['rst']
|
2010-10-30 00:56:40 +01:00
|
|
|
|
2017-06-06 20:34:56 +02:00
|
|
|
writer_class = PelicanHTMLWriter
|
|
|
|
|
field_body_translator_class = _FieldBodyTranslator
|
|
|
|
|
|
2013-04-22 22:18:17 +02:00
|
|
|
def __init__(self, *args, **kwargs):
|
2019-11-18 20:28:48 +03:00
|
|
|
super().__init__(*args, **kwargs)
|
2013-04-22 22:18:17 +02:00
|
|
|
|
2018-11-25 22:11:05 +03:00
|
|
|
lang_code = self.settings.get('DEFAULT_LANG', 'en')
|
|
|
|
|
if get_docutils_lang(lang_code):
|
|
|
|
|
self._language_code = lang_code
|
|
|
|
|
else:
|
|
|
|
|
logger.warning("Docutils has no localization for '%s'."
|
|
|
|
|
" Using 'en' instead.", lang_code)
|
|
|
|
|
self._language_code = 'en'
|
|
|
|
|
|
2018-03-26 23:59:47 -07:00
|
|
|
def _parse_metadata(self, document, source_path):
|
2011-12-23 23:43:32 +00:00
|
|
|
"""Return the dict containing document metadata"""
|
2015-02-10 14:49:29 -05:00
|
|
|
formatted_fields = self.settings['FORMATTED_FIELDS']
|
|
|
|
|
|
2011-12-23 23:43:32 +00:00
|
|
|
output = {}
|
2018-03-26 23:59:47 -07:00
|
|
|
|
|
|
|
|
if document.first_child_matching_class(docutils.nodes.title) is None:
|
|
|
|
|
logger.warning(
|
|
|
|
|
'Document title missing in file %s: '
|
|
|
|
|
'Ensure exactly one top level section',
|
|
|
|
|
source_path)
|
|
|
|
|
|
2011-12-23 23:43:32 +00:00
|
|
|
for docinfo in document.traverse(docutils.nodes.docinfo):
|
|
|
|
|
for element in docinfo.children:
|
2012-03-09 16:21:38 +01:00
|
|
|
if element.tagname == 'field': # custom fields (e.g. summary)
|
2011-12-23 23:43:32 +00:00
|
|
|
name_elem, body_elem = element.children
|
|
|
|
|
name = name_elem.astext()
|
2020-07-30 14:39:46 -04:00
|
|
|
if name.lower() in formatted_fields:
|
2017-06-06 20:34:56 +02:00
|
|
|
value = render_node_to_html(
|
|
|
|
|
document, body_elem,
|
|
|
|
|
self.field_body_translator_class)
|
2012-03-18 15:12:06 +01:00
|
|
|
else:
|
|
|
|
|
value = body_elem.astext()
|
2013-07-05 01:08:45 +02:00
|
|
|
elif element.tagname == 'authors': # author list
|
|
|
|
|
name = element.tagname
|
|
|
|
|
value = [element.astext() for element in element.children]
|
2012-03-09 16:21:38 +01:00
|
|
|
else: # standard fields (e.g. address)
|
2011-12-23 23:43:32 +00:00
|
|
|
name = element.tagname
|
|
|
|
|
value = element.astext()
|
2012-03-12 01:33:30 +09:00
|
|
|
name = name.lower()
|
2011-12-23 23:43:32 +00:00
|
|
|
|
|
|
|
|
output[name] = self.process_metadata(name, value)
|
|
|
|
|
return output
|
2011-05-10 07:55:30 +06:00
|
|
|
|
2013-01-04 10:50:09 -05:00
|
|
|
def _get_publisher(self, source_path):
|
2013-02-08 01:47:20 +02:00
|
|
|
extra_params = {'initial_header_level': '2',
|
2013-04-13 16:36:05 -04:00
|
|
|
'syntax_highlight': 'short',
|
2014-01-09 15:19:31 +01:00
|
|
|
'input_encoding': 'utf-8',
|
2018-11-25 22:11:05 +03:00
|
|
|
'language_code': self._language_code,
|
2018-11-10 13:31:08 +01:00
|
|
|
'halt_level': 2,
|
|
|
|
|
'traceback': True,
|
|
|
|
|
'warning_stream': StringIO(),
|
2014-06-11 16:11:48 -04:00
|
|
|
'embed_stylesheet': False}
|
2013-04-22 22:18:17 +02:00
|
|
|
user_params = self.settings.get('DOCUTILS_SETTINGS')
|
|
|
|
|
if user_params:
|
|
|
|
|
extra_params.update(user_params)
|
|
|
|
|
|
2012-03-09 16:21:38 +01:00
|
|
|
pub = docutils.core.Publisher(
|
2017-06-06 20:34:56 +02:00
|
|
|
writer=self.writer_class(),
|
2012-09-28 23:09:57 +02:00
|
|
|
destination_class=docutils.io.StringOutput)
|
2011-05-10 07:55:30 +06:00
|
|
|
pub.set_components('standalone', 'restructuredtext', 'html')
|
|
|
|
|
pub.process_programmatic_settings(None, extra_params, None)
|
2013-01-04 10:50:09 -05:00
|
|
|
pub.set_source(source_path=source_path)
|
2018-11-10 13:31:08 +01:00
|
|
|
pub.publish()
|
2011-05-10 07:55:30 +06:00
|
|
|
return pub
|
2010-10-30 00:56:40 +01:00
|
|
|
|
2013-01-04 10:50:09 -05:00
|
|
|
def read(self, source_path):
|
2011-05-10 07:55:30 +06:00
|
|
|
"""Parses restructured text"""
|
2013-01-04 10:50:09 -05:00
|
|
|
pub = self._get_publisher(source_path)
|
2011-05-10 07:55:30 +06:00
|
|
|
parts = pub.writer.parts
|
|
|
|
|
content = parts.get('body')
|
|
|
|
|
|
2018-03-26 23:59:47 -07:00
|
|
|
metadata = self._parse_metadata(pub.document, source_path)
|
2011-05-10 07:55:30 +06:00
|
|
|
metadata.setdefault('title', parts.get('title'))
|
|
|
|
|
|
2011-05-07 20:00:30 +01:00
|
|
|
return content, metadata
|
2010-10-30 00:56:40 +01:00
|
|
|
|
2011-05-10 07:55:30 +06:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
class MarkdownReader(BaseReader):
|
2013-08-04 22:03:37 +02:00
|
|
|
"""Reader for Markdown files"""
|
|
|
|
|
|
2011-02-24 05:15:04 +00:00
|
|
|
enabled = bool(Markdown)
|
2013-04-17 21:14:52 -07:00
|
|
|
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
|
2013-02-23 22:59:22 -06:00
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
2019-11-18 20:28:48 +03:00
|
|
|
super().__init__(*args, **kwargs)
|
2016-03-14 20:37:27 +01:00
|
|
|
settings = self.settings['MARKDOWN']
|
|
|
|
|
settings.setdefault('extension_configs', {})
|
|
|
|
|
settings.setdefault('extensions', [])
|
2016-11-15 17:05:12 +01:00
|
|
|
for extension in settings['extension_configs'].keys():
|
|
|
|
|
if extension not in settings['extensions']:
|
|
|
|
|
settings['extensions'].append(extension)
|
|
|
|
|
if 'markdown.extensions.meta' not in settings['extensions']:
|
|
|
|
|
settings['extensions'].append('markdown.extensions.meta')
|
2014-08-22 17:53:36 -04:00
|
|
|
self._source_path = None
|
2010-10-31 00:08:16 +01:00
|
|
|
|
2012-12-11 00:48:47 -05:00
|
|
|
def _parse_metadata(self, meta):
|
|
|
|
|
"""Return the dict containing document metadata"""
|
2015-02-10 14:49:29 -05:00
|
|
|
formatted_fields = self.settings['FORMATTED_FIELDS']
|
|
|
|
|
|
2016-11-23 00:13:28 -05:00
|
|
|
# prevent metadata extraction in fields
|
2020-04-12 11:37:10 +02:00
|
|
|
self._md.preprocessors.deregister('meta')
|
2016-11-23 00:13:28 -05:00
|
|
|
|
2012-12-11 00:48:47 -05:00
|
|
|
output = {}
|
|
|
|
|
for name, value in meta.items():
|
2010-12-17 00:04:45 +03:00
|
|
|
name = name.lower()
|
2015-02-10 14:49:29 -05:00
|
|
|
if name in formatted_fields:
|
2015-10-13 10:26:52 +02:00
|
|
|
# formatted metadata is special case and join all list values
|
|
|
|
|
formatted_values = "\n".join(value)
|
2013-05-10 03:50:33 -04:00
|
|
|
# reset the markdown instance to clear any state
|
|
|
|
|
self._md.reset()
|
2015-10-13 10:26:52 +02:00
|
|
|
formatted = self._md.convert(formatted_values)
|
|
|
|
|
output[name] = self.process_metadata(name, formatted)
|
2016-10-10 12:23:26 +02:00
|
|
|
elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True):
|
2014-08-22 17:53:36 -04:00
|
|
|
if len(value) > 1:
|
2015-06-16 09:25:09 +02:00
|
|
|
logger.warning(
|
|
|
|
|
'Duplicate definition of `%s` '
|
|
|
|
|
'for %s. Using first one.',
|
|
|
|
|
name, self._source_path)
|
2014-08-22 17:53:36 -04:00
|
|
|
output[name] = self.process_metadata(name, value[0])
|
2014-04-19 03:37:47 +09:00
|
|
|
elif len(value) > 1:
|
|
|
|
|
# handle list metadata as list of string
|
|
|
|
|
output[name] = self.process_metadata(name, value)
|
2012-12-11 00:34:15 -05:00
|
|
|
else:
|
2014-04-19 03:37:47 +09:00
|
|
|
# otherwise, handle metadata as single string
|
2012-12-11 00:48:47 -05:00
|
|
|
output[name] = self.process_metadata(name, value[0])
|
|
|
|
|
return output
|
|
|
|
|
|
2013-01-04 10:50:09 -05:00
|
|
|
def read(self, source_path):
|
2012-12-11 00:48:47 -05:00
|
|
|
"""Parse content and metadata of markdown files"""
|
2013-01-28 21:41:42 -05:00
|
|
|
|
2014-08-22 17:53:36 -04:00
|
|
|
self._source_path = source_path
|
2016-03-14 20:37:27 +01:00
|
|
|
self._md = Markdown(**self.settings['MARKDOWN'])
|
2013-01-28 21:41:42 -05:00
|
|
|
with pelican_open(source_path) as text:
|
2013-02-23 22:59:22 -06:00
|
|
|
content = self._md.convert(text)
|
2012-12-11 00:48:47 -05:00
|
|
|
|
2016-08-11 07:51:39 +01:00
|
|
|
if hasattr(self._md, 'Meta'):
|
|
|
|
|
metadata = self._parse_metadata(self._md.Meta)
|
|
|
|
|
else:
|
|
|
|
|
metadata = {}
|
2011-05-07 20:00:30 +01:00
|
|
|
return content, metadata
|
2010-10-31 00:08:16 +01:00
|
|
|
|
2013-03-03 20:12:31 -08:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
class HTMLReader(BaseReader):
|
2012-06-14 23:08:34 -04:00
|
|
|
"""Parses HTML files as input, looking for meta, title, and body tags"""
|
2013-08-04 22:03:37 +02:00
|
|
|
|
2012-06-14 23:08:34 -04:00
|
|
|
file_extensions = ['htm', 'html']
|
|
|
|
|
enabled = True
|
2011-02-14 19:10:01 +01:00
|
|
|
|
2012-06-14 23:08:34 -04:00
|
|
|
class _HTMLParser(HTMLParser):
|
2013-06-14 12:12:19 -07:00
|
|
|
def __init__(self, settings, filename):
|
2019-11-18 20:28:48 +03:00
|
|
|
super().__init__(convert_charrefs=False)
|
2012-06-14 23:08:34 -04:00
|
|
|
self.body = ''
|
|
|
|
|
self.metadata = {}
|
|
|
|
|
self.settings = settings
|
2012-06-10 18:27:38 -04:00
|
|
|
|
|
|
|
|
self._data_buffer = ''
|
2012-06-14 23:08:34 -04:00
|
|
|
|
2013-06-14 12:12:19 -07:00
|
|
|
self._filename = filename
|
|
|
|
|
|
2012-06-14 23:08:34 -04:00
|
|
|
self._in_top_level = True
|
|
|
|
|
self._in_head = False
|
2012-06-10 18:27:38 -04:00
|
|
|
self._in_title = False
|
|
|
|
|
self._in_body = False
|
2012-06-14 23:08:34 -04:00
|
|
|
self._in_tags = False
|
|
|
|
|
|
|
|
|
|
def handle_starttag(self, tag, attrs):
|
|
|
|
|
if tag == 'head' and self._in_top_level:
|
|
|
|
|
self._in_top_level = False
|
|
|
|
|
self._in_head = True
|
|
|
|
|
elif tag == 'title' and self._in_head:
|
|
|
|
|
self._in_title = True
|
|
|
|
|
self._data_buffer = ''
|
|
|
|
|
elif tag == 'body' and self._in_top_level:
|
|
|
|
|
self._in_top_level = False
|
|
|
|
|
self._in_body = True
|
|
|
|
|
self._data_buffer = ''
|
|
|
|
|
elif tag == 'meta' and self._in_head:
|
|
|
|
|
self._handle_meta_tag(attrs)
|
|
|
|
|
|
|
|
|
|
elif self._in_body:
|
|
|
|
|
self._data_buffer += self.build_tag(tag, attrs, False)
|
|
|
|
|
|
|
|
|
|
def handle_endtag(self, tag):
|
|
|
|
|
if tag == 'head':
|
|
|
|
|
if self._in_head:
|
|
|
|
|
self._in_head = False
|
|
|
|
|
self._in_top_level = True
|
2019-10-11 00:29:00 +01:00
|
|
|
elif self._in_head and tag == 'title':
|
2012-06-14 23:08:34 -04:00
|
|
|
self._in_title = False
|
|
|
|
|
self.metadata['title'] = self._data_buffer
|
|
|
|
|
elif tag == 'body':
|
|
|
|
|
self.body = self._data_buffer
|
|
|
|
|
self._in_body = False
|
|
|
|
|
self._in_top_level = True
|
|
|
|
|
elif self._in_body:
|
2019-11-17 19:19:37 +03:00
|
|
|
self._data_buffer += '</{}>'.format(escape(tag))
|
2012-06-14 23:08:34 -04:00
|
|
|
|
|
|
|
|
def handle_startendtag(self, tag, attrs):
|
|
|
|
|
if tag == 'meta' and self._in_head:
|
|
|
|
|
self._handle_meta_tag(attrs)
|
|
|
|
|
if self._in_body:
|
|
|
|
|
self._data_buffer += self.build_tag(tag, attrs, True)
|
|
|
|
|
|
|
|
|
|
def handle_comment(self, data):
|
2013-01-28 22:11:06 -05:00
|
|
|
self._data_buffer += '<!--{}-->'.format(data)
|
2012-06-14 23:08:34 -04:00
|
|
|
|
|
|
|
|
def handle_data(self, data):
|
|
|
|
|
self._data_buffer += data
|
|
|
|
|
|
2012-06-20 23:19:06 -04:00
|
|
|
def handle_entityref(self, data):
|
|
|
|
|
self._data_buffer += '&{};'.format(data)
|
|
|
|
|
|
|
|
|
|
def handle_charref(self, data):
|
2012-06-21 09:05:27 -04:00
|
|
|
self._data_buffer += '&#{};'.format(data)
|
2013-03-03 20:12:31 -08:00
|
|
|
|
2012-06-14 23:08:34 -04:00
|
|
|
def build_tag(self, tag, attrs, close_tag):
|
2019-11-17 19:19:37 +03:00
|
|
|
result = '<{}'.format(escape(tag))
|
2013-03-03 20:12:31 -08:00
|
|
|
for k, v in attrs:
|
2019-11-17 19:19:37 +03:00
|
|
|
result += ' ' + escape(k)
|
2013-02-10 11:02:52 -05:00
|
|
|
if v is not None:
|
2015-10-12 20:31:32 +00:00
|
|
|
# If the attribute value contains a double quote, surround
|
|
|
|
|
# with single quotes, otherwise use double quotes.
|
|
|
|
|
if '"' in v:
|
2019-11-17 19:19:37 +03:00
|
|
|
result += "='{}'".format(escape(v, quote=False))
|
2015-10-12 20:31:32 +00:00
|
|
|
else:
|
2019-11-17 19:19:37 +03:00
|
|
|
result += '="{}"'.format(escape(v, quote=False))
|
2012-06-14 23:08:34 -04:00
|
|
|
if close_tag:
|
|
|
|
|
return result + ' />'
|
|
|
|
|
return result + '>'
|
|
|
|
|
|
|
|
|
|
def _handle_meta_tag(self, attrs):
|
2013-11-08 14:37:07 -08:00
|
|
|
name = self._attr_value(attrs, 'name')
|
|
|
|
|
if name is None:
|
2015-06-16 09:25:09 +02:00
|
|
|
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
|
|
|
|
|
attr_serialized = ', '.join(attr_list)
|
2014-07-22 11:48:15 -04:00
|
|
|
logger.warning("Meta tag in file %s does not have a 'name' "
|
2015-02-10 14:49:29 -05:00
|
|
|
"attribute, skipping. Attributes: %s",
|
2014-07-22 11:48:15 -04:00
|
|
|
self._filename, attr_serialized)
|
2013-11-08 14:37:07 -08:00
|
|
|
return
|
|
|
|
|
name = name.lower()
|
2013-06-14 12:12:19 -07:00
|
|
|
contents = self._attr_value(attrs, 'content', '')
|
|
|
|
|
if not contents:
|
|
|
|
|
contents = self._attr_value(attrs, 'contents', '')
|
|
|
|
|
if contents:
|
2014-07-22 11:48:15 -04:00
|
|
|
logger.warning(
|
|
|
|
|
"Meta tag attribute 'contents' used in file %s, should"
|
|
|
|
|
" be changed to 'content'",
|
|
|
|
|
self._filename,
|
2015-06-16 09:25:09 +02:00
|
|
|
extra={'limit_msg': "Other files have meta tag "
|
|
|
|
|
"attribute 'contents' that should "
|
|
|
|
|
"be changed to 'content'"})
|
2012-06-14 23:08:34 -04:00
|
|
|
|
|
|
|
|
if name == 'keywords':
|
|
|
|
|
name = 'tags'
|
2017-12-02 13:10:43 +02:00
|
|
|
|
|
|
|
|
if name in self.metadata:
|
|
|
|
|
# if this metadata already exists (i.e. a previous tag with the
|
|
|
|
|
# same name has already been specified then either convert to
|
|
|
|
|
# list or append to list
|
|
|
|
|
if isinstance(self.metadata[name], list):
|
|
|
|
|
self.metadata[name].append(contents)
|
|
|
|
|
else:
|
|
|
|
|
self.metadata[name] = [self.metadata[name], contents]
|
|
|
|
|
else:
|
|
|
|
|
self.metadata[name] = contents
|
2012-06-10 18:27:38 -04:00
|
|
|
|
2012-06-14 23:08:34 -04:00
|
|
|
@classmethod
|
|
|
|
|
def _attr_value(cls, attrs, name, default=None):
|
|
|
|
|
return next((x[1] for x in attrs if x[0] == name), default)
|
2012-06-10 18:27:38 -04:00
|
|
|
|
|
|
|
|
def read(self, filename):
|
2013-01-28 21:46:54 -05:00
|
|
|
"""Parse content and metadata of HTML files"""
|
2013-01-28 22:11:06 -05:00
|
|
|
with pelican_open(filename) as content:
|
2013-06-14 12:12:19 -07:00
|
|
|
parser = self._HTMLParser(self.settings, filename)
|
2012-06-10 18:27:38 -04:00
|
|
|
parser.feed(content)
|
|
|
|
|
parser.close()
|
2011-02-14 19:10:01 +01:00
|
|
|
|
2012-06-14 23:08:34 -04:00
|
|
|
metadata = {}
|
|
|
|
|
for k in parser.metadata:
|
|
|
|
|
metadata[k] = self.process_metadata(k, parser.metadata[k])
|
|
|
|
|
return parser.body, metadata
|
2012-10-28 07:37:53 -07:00
|
|
|
|
2013-03-03 20:12:31 -08:00
|
|
|
|
2014-04-20 14:34:52 +02:00
|
|
|
class Readers(FileStampDataCacher):
|
2013-08-04 22:03:37 +02:00
|
|
|
"""Interface for all readers.
|
|
|
|
|
|
|
|
|
|
This class contains a mapping of file extensions / Reader classes, to know
|
|
|
|
|
which Reader class must be used to read a file (based on its extension).
|
|
|
|
|
This is customizable both with the 'READERS' setting, and with the
|
|
|
|
|
'readers_init' signall for plugins.
|
2012-04-10 00:40:05 -04:00
|
|
|
|
2013-08-04 22:03:37 +02:00
|
|
|
"""
|
2013-08-07 22:43:08 +02:00
|
|
|
|
2014-04-20 14:34:52 +02:00
|
|
|
def __init__(self, settings=None, cache_name=''):
|
2013-08-04 17:02:58 +02:00
|
|
|
self.settings = settings or {}
|
|
|
|
|
self.readers = {}
|
2013-08-07 00:01:12 +02:00
|
|
|
self.reader_classes = {}
|
2010-10-30 00:56:40 +01:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
for cls in [BaseReader] + BaseReader.__subclasses__():
|
2013-08-07 22:43:08 +02:00
|
|
|
if not cls.enabled:
|
2014-07-22 11:48:15 -04:00
|
|
|
logger.debug('Missing dependencies for %s',
|
|
|
|
|
', '.join(cls.file_extensions))
|
2013-08-07 22:43:08 +02:00
|
|
|
continue
|
|
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
for ext in cls.file_extensions:
|
2013-08-07 00:01:12 +02:00
|
|
|
self.reader_classes[ext] = cls
|
2012-03-09 16:17:09 +01:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
if self.settings['READERS']:
|
2013-08-07 00:01:12 +02:00
|
|
|
self.reader_classes.update(self.settings['READERS'])
|
2012-03-11 02:48:36 +01:00
|
|
|
|
2013-08-07 00:01:12 +02:00
|
|
|
signals.readers_init.send(self)
|
|
|
|
|
|
|
|
|
|
for fmt, reader_class in self.reader_classes.items():
|
2013-08-04 17:02:58 +02:00
|
|
|
if not reader_class:
|
|
|
|
|
continue
|
2012-03-11 02:48:36 +01:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
self.readers[fmt] = reader_class(self.settings)
|
|
|
|
|
|
2014-04-20 14:34:52 +02:00
|
|
|
# set up caching
|
|
|
|
|
cache_this_level = (cache_name != '' and
|
|
|
|
|
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
|
|
|
|
|
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
|
|
|
|
|
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
|
2019-11-18 20:28:48 +03:00
|
|
|
super().__init__(settings, cache_name, caching_policy, load_policy)
|
2014-04-20 14:34:52 +02:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
@property
|
|
|
|
|
def extensions(self):
|
|
|
|
|
return self.readers.keys()
|
2013-06-03 15:29:54 -04:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
def read_file(self, base_path, path, content_class=Page, fmt=None,
|
|
|
|
|
context=None, preread_signal=None, preread_sender=None,
|
|
|
|
|
context_signal=None, context_sender=None):
|
|
|
|
|
"""Return a content object parsed with the given format."""
|
2013-06-03 15:29:54 -04:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
path = os.path.abspath(os.path.join(base_path, path))
|
2015-01-02 23:45:44 -08:00
|
|
|
source_path = posixize_path(os.path.relpath(path, base_path))
|
2015-06-16 09:25:09 +02:00
|
|
|
logger.debug(
|
|
|
|
|
'Read file %s -> %s',
|
2014-07-22 11:48:15 -04:00
|
|
|
source_path, content_class.__name__)
|
2013-08-04 17:02:58 +02:00
|
|
|
|
|
|
|
|
if not fmt:
|
|
|
|
|
_, ext = os.path.splitext(os.path.basename(path))
|
|
|
|
|
fmt = ext[1:]
|
2012-03-11 02:48:36 +01:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
if fmt not in self.readers:
|
|
|
|
|
raise TypeError(
|
2014-07-22 11:48:15 -04:00
|
|
|
'Pelican does not know how to parse %s', path)
|
2013-08-04 17:02:58 +02:00
|
|
|
|
|
|
|
|
if preread_signal:
|
2015-06-16 09:25:09 +02:00
|
|
|
logger.debug(
|
|
|
|
|
'Signal %s.send(%s)',
|
2014-07-22 11:48:15 -04:00
|
|
|
preread_signal.name, preread_sender)
|
2013-08-04 17:02:58 +02:00
|
|
|
preread_signal.send(preread_sender)
|
2012-03-11 02:48:36 +01:00
|
|
|
|
2013-08-04 17:02:58 +02:00
|
|
|
reader = self.readers[fmt]
|
|
|
|
|
|
Ignore empty metadata. Fixes #1469. Fixes #1398.
Some metadata values cause problems when empty. For example, a markdown file
containing a Slug: line with no additional text causing Pelican to produce a
file named ".html" instead of generating a proper file name. Others, like
those created by a PATH_METADATA regex, must be preserved even if empty,
so things like PAGE_URL="filename{customvalue}.html" will always work.
Essentially, we want to discard empty metadata that we know will be useless
or problematic. This is better than raising an exception because (a) it
allows users to deliberately keep empty metadata in their source files for
filling in later, and (b) users shouldn't be forced to fix empty metadata
created by blog migration tools (see #1398).
The metadata processors are the ideal place to do this, because they know
the type of data they are handling and whether an empty value is wanted.
Unfortunately, they can't discard items, and neither can process_metadata(),
because their return values are always saved by calling code. We can't
safely change the calling code, because some of it lives in custom reader
classes out in the field, and we don't want to break those working systems.
Discarding empty values at the time of use isn't good enough, because that
still allows useless empty values in a source file to override configured
defaults.
My solution:
- When processing a list of values, a metadata processor will omit any
unwanted empty ones from the list it returns.
- When processing an entirely unwanted value, it will return something easily
identifiable that will pass through the reader code.
- When collecting the processed metadata, read_file() will filter out items
identified as unwanted.
These metadata are affected by this change:
author, authors, category, slug, status, tags.
I also removed a bit of now-superfluous code from generators.py that was
discarding empty authors at the time of use.
2014-09-29 22:51:13 -07:00
|
|
|
metadata = _filter_discardable_metadata(default_metadata(
|
|
|
|
|
settings=self.settings, process=reader.process_metadata))
|
2013-08-04 17:02:58 +02:00
|
|
|
metadata.update(path_metadata(
|
|
|
|
|
full_path=path, source_path=source_path,
|
|
|
|
|
settings=self.settings))
|
Ignore empty metadata. Fixes #1469. Fixes #1398.
Some metadata values cause problems when empty. For example, a markdown file
containing a Slug: line with no additional text causing Pelican to produce a
file named ".html" instead of generating a proper file name. Others, like
those created by a PATH_METADATA regex, must be preserved even if empty,
so things like PAGE_URL="filename{customvalue}.html" will always work.
Essentially, we want to discard empty metadata that we know will be useless
or problematic. This is better than raising an exception because (a) it
allows users to deliberately keep empty metadata in their source files for
filling in later, and (b) users shouldn't be forced to fix empty metadata
created by blog migration tools (see #1398).
The metadata processors are the ideal place to do this, because they know
the type of data they are handling and whether an empty value is wanted.
Unfortunately, they can't discard items, and neither can process_metadata(),
because their return values are always saved by calling code. We can't
safely change the calling code, because some of it lives in custom reader
classes out in the field, and we don't want to break those working systems.
Discarding empty values at the time of use isn't good enough, because that
still allows useless empty values in a source file to override configured
defaults.
My solution:
- When processing a list of values, a metadata processor will omit any
unwanted empty ones from the list it returns.
- When processing an entirely unwanted value, it will return something easily
identifiable that will pass through the reader code.
- When collecting the processed metadata, read_file() will filter out items
identified as unwanted.
These metadata are affected by this change:
author, authors, category, slug, status, tags.
I also removed a bit of now-superfluous code from generators.py that was
discarding empty authors at the time of use.
2014-09-29 22:51:13 -07:00
|
|
|
metadata.update(_filter_discardable_metadata(parse_path_metadata(
|
2013-08-04 17:02:58 +02:00
|
|
|
source_path=source_path, settings=self.settings,
|
Ignore empty metadata. Fixes #1469. Fixes #1398.
Some metadata values cause problems when empty. For example, a markdown file
containing a Slug: line with no additional text causing Pelican to produce a
file named ".html" instead of generating a proper file name. Others, like
those created by a PATH_METADATA regex, must be preserved even if empty,
so things like PAGE_URL="filename{customvalue}.html" will always work.
Essentially, we want to discard empty metadata that we know will be useless
or problematic. This is better than raising an exception because (a) it
allows users to deliberately keep empty metadata in their source files for
filling in later, and (b) users shouldn't be forced to fix empty metadata
created by blog migration tools (see #1398).
The metadata processors are the ideal place to do this, because they know
the type of data they are handling and whether an empty value is wanted.
Unfortunately, they can't discard items, and neither can process_metadata(),
because their return values are always saved by calling code. We can't
safely change the calling code, because some of it lives in custom reader
classes out in the field, and we don't want to break those working systems.
Discarding empty values at the time of use isn't good enough, because that
still allows useless empty values in a source file to override configured
defaults.
My solution:
- When processing a list of values, a metadata processor will omit any
unwanted empty ones from the list it returns.
- When processing an entirely unwanted value, it will return something easily
identifiable that will pass through the reader code.
- When collecting the processed metadata, read_file() will filter out items
identified as unwanted.
These metadata are affected by this change:
author, authors, category, slug, status, tags.
I also removed a bit of now-superfluous code from generators.py that was
discarding empty authors at the time of use.
2014-09-29 22:51:13 -07:00
|
|
|
process=reader.process_metadata)))
|
2014-04-21 17:29:01 +02:00
|
|
|
reader_name = reader.__class__.__name__
|
|
|
|
|
metadata['reader'] = reader_name.replace('Reader', '').lower()
|
2013-08-04 17:02:58 +02:00
|
|
|
|
2014-04-20 14:34:52 +02:00
|
|
|
content, reader_metadata = self.get_cached_data(path, (None, None))
|
|
|
|
|
if content is None:
|
|
|
|
|
content, reader_metadata = reader.read(path)
|
2021-10-06 09:19:17 +01:00
|
|
|
reader_metadata = _filter_discardable_metadata(reader_metadata)
|
2014-04-20 14:34:52 +02:00
|
|
|
self.cache_data(path, (content, reader_metadata))
|
2021-10-06 09:19:17 +01:00
|
|
|
metadata.update(reader_metadata)
|
2013-08-04 17:02:58 +02:00
|
|
|
|
|
|
|
|
if content:
|
|
|
|
|
# find images with empty alt
|
2013-08-07 00:10:26 +02:00
|
|
|
find_empty_alt(content, path)
|
2013-08-04 17:02:58 +02:00
|
|
|
|
|
|
|
|
# eventually filter the content with typogrify if asked so
|
2014-03-24 14:50:49 -04:00
|
|
|
if self.settings['TYPOGRIFY']:
|
2013-08-04 17:02:58 +02:00
|
|
|
from typogrify.filters import typogrify
|
2015-02-12 16:24:59 -08:00
|
|
|
import smartypants
|
|
|
|
|
|
2020-04-16 12:32:19 +01:00
|
|
|
typogrify_dashes = self.settings['TYPOGRIFY_DASHES']
|
|
|
|
|
if typogrify_dashes == 'oldschool':
|
|
|
|
|
smartypants.Attr.default = smartypants.Attr.set2
|
|
|
|
|
elif typogrify_dashes == 'oldschool_inverted':
|
|
|
|
|
smartypants.Attr.default = smartypants.Attr.set3
|
|
|
|
|
else:
|
|
|
|
|
smartypants.Attr.default = smartypants.Attr.set1
|
|
|
|
|
|
2015-02-12 16:24:59 -08:00
|
|
|
# Tell `smartypants` to also replace " HTML entities with
|
|
|
|
|
# smart quotes. This is necessary because Docutils has already
|
|
|
|
|
# replaced double quotes with said entities by the time we run
|
|
|
|
|
# this filter.
|
|
|
|
|
smartypants.Attr.default |= smartypants.Attr.w
|
2014-07-19 15:07:44 -07:00
|
|
|
|
|
|
|
|
def typogrify_wrapper(text):
|
|
|
|
|
"""Ensures ignore_tags feature is backward compatible"""
|
|
|
|
|
try:
|
2015-06-16 09:25:09 +02:00
|
|
|
return typogrify(
|
|
|
|
|
text,
|
|
|
|
|
self.settings['TYPOGRIFY_IGNORE_TAGS'])
|
2014-07-19 15:07:44 -07:00
|
|
|
except TypeError:
|
|
|
|
|
return typogrify(text)
|
|
|
|
|
|
2014-03-24 14:50:49 -04:00
|
|
|
if content:
|
2014-07-19 15:07:44 -07:00
|
|
|
content = typogrify_wrapper(content)
|
2016-05-28 19:31:28 +02:00
|
|
|
|
|
|
|
|
if 'title' in metadata:
|
2014-07-19 15:07:44 -07:00
|
|
|
metadata['title'] = typogrify_wrapper(metadata['title'])
|
|
|
|
|
|
2014-03-24 14:50:49 -04:00
|
|
|
if 'summary' in metadata:
|
2014-07-19 15:07:44 -07:00
|
|
|
metadata['summary'] = typogrify_wrapper(metadata['summary'])
|
2013-08-04 17:02:58 +02:00
|
|
|
|
|
|
|
|
if context_signal:
|
2015-06-16 09:25:09 +02:00
|
|
|
logger.debug(
|
|
|
|
|
'Signal %s.send(%s, <metadata>)',
|
|
|
|
|
context_signal.name,
|
|
|
|
|
context_sender)
|
2013-08-04 17:02:58 +02:00
|
|
|
context_signal.send(context_sender, metadata=metadata)
|
|
|
|
|
|
|
|
|
|
return content_class(content=content, metadata=metadata,
|
|
|
|
|
settings=self.settings, source_path=path,
|
|
|
|
|
context=context)
|
2013-01-04 14:25:12 -05:00
|
|
|
|
2013-01-04 18:14:28 -05:00
|
|
|
|
2013-08-07 00:10:26 +02:00
|
|
|
def find_empty_alt(content, path):
|
|
|
|
|
"""Find images with empty alt
|
|
|
|
|
|
|
|
|
|
Create warnings for all images with empty alt (up to a certain number),
|
|
|
|
|
as they are really likely to be accessibility flaws.
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
imgs = re.compile(r"""
|
|
|
|
|
(?:
|
|
|
|
|
# src before alt
|
|
|
|
|
<img
|
|
|
|
|
[^\>]*
|
2016-11-17 23:15:46 +00:00
|
|
|
src=(['"])(.*?)\1
|
2013-08-07 00:10:26 +02:00
|
|
|
[^\>]*
|
|
|
|
|
alt=(['"])\3
|
|
|
|
|
)|(?:
|
|
|
|
|
# alt before src
|
|
|
|
|
<img
|
|
|
|
|
[^\>]*
|
|
|
|
|
alt=(['"])\4
|
|
|
|
|
[^\>]*
|
2016-11-17 23:15:46 +00:00
|
|
|
src=(['"])(.*?)\5
|
2013-08-07 00:10:26 +02:00
|
|
|
)
|
|
|
|
|
""", re.X)
|
2014-04-01 20:44:09 +02:00
|
|
|
for match in re.findall(imgs, content):
|
2014-07-22 11:48:15 -04:00
|
|
|
logger.warning(
|
|
|
|
|
'Empty alt attribute for image %s in %s',
|
|
|
|
|
os.path.basename(match[1] + match[5]), path,
|
|
|
|
|
extra={'limit_msg': 'Other images have empty alt attributes'})
|
2013-08-07 00:10:26 +02:00
|
|
|
|
|
|
|
|
|
2013-01-04 18:14:28 -05:00
|
|
|
def default_metadata(settings=None, process=None):
|
|
|
|
|
metadata = {}
|
|
|
|
|
if settings:
|
2015-02-17 20:05:00 -05:00
|
|
|
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
|
|
|
|
|
if process:
|
|
|
|
|
value = process(name, value)
|
|
|
|
|
metadata[name] = value
|
2013-01-04 18:14:28 -05:00
|
|
|
if 'DEFAULT_CATEGORY' in settings:
|
|
|
|
|
value = settings['DEFAULT_CATEGORY']
|
|
|
|
|
if process:
|
|
|
|
|
value = process('category', value)
|
|
|
|
|
metadata['category'] = value
|
2015-06-16 09:25:09 +02:00
|
|
|
if settings.get('DEFAULT_DATE', None) and \
|
|
|
|
|
settings['DEFAULT_DATE'] != 'fs':
|
2019-11-05 23:17:19 -08:00
|
|
|
if isinstance(settings['DEFAULT_DATE'], str):
|
2014-09-22 13:22:36 -07:00
|
|
|
metadata['date'] = get_date(settings['DEFAULT_DATE'])
|
|
|
|
|
else:
|
2019-11-05 23:17:19 -08:00
|
|
|
metadata['date'] = datetime.datetime(*settings['DEFAULT_DATE'])
|
2013-01-04 18:14:28 -05:00
|
|
|
return metadata
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def path_metadata(full_path, source_path, settings=None):
|
|
|
|
|
metadata = {}
|
2013-06-03 15:15:53 -04:00
|
|
|
if settings:
|
|
|
|
|
if settings.get('DEFAULT_DATE', None) == 'fs':
|
2019-11-05 23:17:19 -08:00
|
|
|
metadata['date'] = datetime.datetime.fromtimestamp(
|
2015-12-26 19:07:41 +01:00
|
|
|
os.stat(full_path).st_mtime)
|
2020-05-04 01:45:25 +10:00
|
|
|
metadata['modified'] = metadata['date']
|
2017-07-16 17:17:51 -04:00
|
|
|
|
|
|
|
|
# Apply EXTRA_PATH_METADATA for the source path and the paths of any
|
|
|
|
|
# parent directories. Sorting EPM first ensures that the most specific
|
|
|
|
|
# path wins conflicts.
|
|
|
|
|
|
|
|
|
|
epm = settings.get('EXTRA_PATH_METADATA', {})
|
|
|
|
|
for path, meta in sorted(epm.items()):
|
|
|
|
|
# Enforce a trailing slash when checking for parent directories.
|
|
|
|
|
# This prevents false positives when one file or directory's name
|
|
|
|
|
# is a prefix of another's.
|
2020-04-28 23:29:44 +03:00
|
|
|
dirpath = posixize_path(os.path.join(path, ''))
|
2017-07-16 17:17:51 -04:00
|
|
|
if source_path == path or source_path.startswith(dirpath):
|
|
|
|
|
metadata.update(meta)
|
|
|
|
|
|
2013-01-04 18:14:28 -05:00
|
|
|
return metadata
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_path_metadata(source_path, settings=None, process=None):
|
2017-03-29 10:45:41 +02:00
|
|
|
r"""Extract a metadata dictionary from a file's path
|
2013-01-04 14:25:12 -05:00
|
|
|
|
|
|
|
|
>>> import pprint
|
|
|
|
|
>>> settings = {
|
2017-03-29 10:19:47 +02:00
|
|
|
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
|
2013-01-04 14:30:40 -05:00
|
|
|
... 'PATH_METADATA':
|
2017-03-29 10:19:47 +02:00
|
|
|
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
|
2013-01-04 14:25:12 -05:00
|
|
|
... }
|
2013-08-04 17:02:58 +02:00
|
|
|
>>> reader = BaseReader(settings=settings)
|
2013-01-04 14:25:12 -05:00
|
|
|
>>> metadata = parse_path_metadata(
|
2013-01-04 18:14:28 -05:00
|
|
|
... source_path='my-cat/2013-01-01/my-slug.html',
|
2013-01-04 14:25:12 -05:00
|
|
|
... settings=settings,
|
|
|
|
|
... process=reader.process_metadata)
|
2013-01-04 14:30:40 -05:00
|
|
|
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
|
2017-03-29 10:45:41 +02:00
|
|
|
{'category': <pelican.urlwrappers.Category object at ...>,
|
2019-11-05 23:17:19 -08:00
|
|
|
'date': datetime.datetime(2013, 1, 1, 0, 0),
|
2017-03-29 10:45:41 +02:00
|
|
|
'slug': 'my-slug'}
|
2013-01-04 14:25:12 -05:00
|
|
|
"""
|
|
|
|
|
metadata = {}
|
2013-01-04 18:14:28 -05:00
|
|
|
dirname, basename = os.path.split(source_path)
|
|
|
|
|
base, ext = os.path.splitext(basename)
|
|
|
|
|
subdir = os.path.basename(dirname)
|
2013-01-04 14:25:12 -05:00
|
|
|
if settings:
|
2013-01-04 18:14:28 -05:00
|
|
|
checks = []
|
2013-08-07 00:10:26 +02:00
|
|
|
for key, data in [('FILENAME_METADATA', base),
|
|
|
|
|
('PATH_METADATA', source_path)]:
|
2013-01-04 18:14:28 -05:00
|
|
|
checks.append((settings.get(key, None), data))
|
|
|
|
|
if settings.get('USE_FOLDER_AS_CATEGORY', None):
|
2016-09-19 18:29:58 +08:00
|
|
|
checks.append(('(?P<category>.*)', subdir))
|
2013-08-07 00:10:26 +02:00
|
|
|
for regexp, data in checks:
|
2013-01-04 18:14:28 -05:00
|
|
|
if regexp and data:
|
2013-01-04 14:25:12 -05:00
|
|
|
match = re.match(regexp, data)
|
|
|
|
|
if match:
|
|
|
|
|
# .items() for py3k compat.
|
|
|
|
|
for k, v in match.groupdict().items():
|
2016-09-19 18:26:16 +08:00
|
|
|
k = k.lower() # metadata must be lowercase
|
2017-03-11 13:38:54 +01:00
|
|
|
if v is not None and k not in metadata:
|
2013-01-04 14:25:12 -05:00
|
|
|
if process:
|
|
|
|
|
v = process(k, v)
|
|
|
|
|
metadata[k] = v
|
|
|
|
|
return metadata
|