1
0
Fork 0
forked from github/pelican

Ignore empty metadata. Fixes #1469. Fixes #1398.

Some metadata values cause problems when empty.  For example, a markdown file
containing a Slug: line with no additional text causing Pelican to produce a
file named ".html" instead of generating a proper file name.  Others, like
those created by a PATH_METADATA regex, must be preserved even if empty,
so things like PAGE_URL="filename{customvalue}.html" will always work.
Essentially, we want to discard empty metadata that we know will be useless
or problematic.  This is better than raising an exception because (a) it
allows users to deliberately keep empty metadata in their source files for
filling in later, and (b) users shouldn't be forced to fix empty metadata
created by blog migration tools (see #1398).

The metadata processors are the ideal place to do this, because they know
the type of data they are handling and whether an empty value is wanted.
Unfortunately, they can't discard items, and neither can process_metadata(),
because their return values are always saved by calling code.  We can't
safely change the calling code, because some of it lives in custom reader
classes out in the field, and we don't want to break those working systems.
Discarding empty values at the time of use isn't good enough, because that
still allows useless empty values in a source file to override configured
defaults.

My solution:
- When processing a list of values, a metadata processor will omit any
  unwanted empty ones from the list it returns.
- When processing an entirely unwanted value, it will return something easily
  identifiable that will pass through the reader code.
- When collecting the processed metadata, read_file() will filter out items
  identified as unwanted.

These metadata are affected by this change:
author, authors, category, slug, status, tags.

I also removed a bit of now-superfluous code from generators.py that was
discarding empty authors at the time of use.
This commit is contained in:
Forest 2014-09-29 22:51:13 -07:00
commit db2e517450
2 changed files with 39 additions and 13 deletions

View file

@ -544,10 +544,8 @@ class ArticlesGenerator(CachingGenerator):
if hasattr(article, 'tags'): if hasattr(article, 'tags'):
for tag in article.tags: for tag in article.tags:
self.tags[tag].append(article) self.tags[tag].append(article)
# ignore blank authors as well as undefined
for author in getattr(article, 'authors', []): for author in getattr(article, 'authors', []):
if author.name != '': self.authors[author].append(article)
self.authors[author].append(article)
# sort the articles by date # sort the articles by date
self.articles.sort(key=attrgetter('date'), reverse=True) self.articles.sort(key=attrgetter('date'), reverse=True)
self.dates = list(self.articles) self.dates = list(self.articles)

View file

@ -28,16 +28,44 @@ from pelican.contents import Page, Category, Tag, Author
from pelican.utils import get_date, pelican_open, FileStampDataCacher, SafeDatetime, posixize_path from pelican.utils import get_date, pelican_open, FileStampDataCacher, SafeDatetime, posixize_path
def strip_split(text, sep=','):
"""Return a list of stripped, non-empty substrings, delimited by sep."""
items = [x.strip() for x in text.split(sep)]
return [x for x in items if x]
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
METADATA_PROCESSORS = { METADATA_PROCESSORS = {
'tags': lambda x, y: [Tag(tag, y) for tag in x.split(',')], 'tags': lambda x, y: [Tag(tag, y) for tag in strip_split(x)] or _DISCARD,
'date': lambda x, y: get_date(x.replace('_', ' ')), 'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x), 'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip(), 'status': lambda x, y: x.strip() or _DISCARD,
'category': Category, 'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': Author, 'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: [Author(author.strip(), y) for author in x.split(',')], 'authors': lambda x, y: [Author(a, y) for a in strip_split(x)] or _DISCARD,
'slug': lambda x, y: x.strip() or _DISCARD,
} }
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class BaseReader(object): class BaseReader(object):
@ -447,14 +475,14 @@ class Readers(FileStampDataCacher):
reader = self.readers[fmt] reader = self.readers[fmt]
metadata = default_metadata( metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata) settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata( metadata.update(path_metadata(
full_path=path, source_path=source_path, full_path=path, source_path=source_path,
settings=self.settings)) settings=self.settings))
metadata.update(parse_path_metadata( metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings, source_path=source_path, settings=self.settings,
process=reader.process_metadata)) process=reader.process_metadata)))
reader_name = reader.__class__.__name__ reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower() metadata['reader'] = reader_name.replace('Reader', '').lower()
@ -462,7 +490,7 @@ class Readers(FileStampDataCacher):
if content is None: if content is None:
content, reader_metadata = reader.read(path) content, reader_metadata = reader.read(path)
self.cache_data(path, (content, reader_metadata)) self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata) metadata.update(_filter_discardable_metadata(reader_metadata))
if content: if content:
# find images with empty alt # find images with empty alt