mirror of
https://github.com/getpelican/pelican.git
synced 2025-10-15 20:28:56 +02:00
Merge branch 'master' of https://github.com/svetlyak40wt/pelican
Conflicts: pelican/generators.py
This commit is contained in:
commit
8f59649eb6
6 changed files with 108 additions and 25 deletions
4
.gitignore
vendored
Normal file
4
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
*.egg-info
|
||||
.*.swp
|
||||
.*.swo
|
||||
*.pyc
|
||||
|
|
@ -12,6 +12,8 @@ class Page(object):
|
|||
|
||||
def __init__(self, content, metadatas={}, settings={}, filename=None):
|
||||
self.content = content
|
||||
self.translations = []
|
||||
|
||||
self.status = "published" # default value
|
||||
for key, value in metadatas.items():
|
||||
setattr(self, key, value)
|
||||
|
|
@ -20,6 +22,28 @@ class Page(object):
|
|||
if 'AUTHOR' in settings:
|
||||
self.author = settings['AUTHOR']
|
||||
|
||||
default_lang = settings.get('DEFAULT_LANG', 'en').lower()
|
||||
if not hasattr(self, 'lang'):
|
||||
self.lang = default_lang
|
||||
|
||||
self.in_default_lang = (self.lang == default_lang)
|
||||
|
||||
if not hasattr(self, 'slug'):
|
||||
self.slug = slugify(self.title)
|
||||
|
||||
if not hasattr(self, 'save_as'):
|
||||
if self.in_default_lang:
|
||||
self.save_as = '%s.html' % self.slug
|
||||
clean_url = '%s/' % self.slug
|
||||
else:
|
||||
self.save_as = '%s-%s.html' % (self.slug, self.lang)
|
||||
clean_url = '%s-%s/' % (self.slug, self.lang)
|
||||
|
||||
if settings.get('CLEAN_URLS', False):
|
||||
self.url = clean_url
|
||||
else:
|
||||
self.url = self.save_as
|
||||
|
||||
if filename:
|
||||
self.filename = filename
|
||||
|
||||
|
|
@ -29,14 +53,6 @@ class Page(object):
|
|||
if not hasattr(self, prop):
|
||||
raise NameError(prop)
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return '%s.html' % self.slug
|
||||
|
||||
@property
|
||||
def slug(self):
|
||||
return slugify(self.title)
|
||||
|
||||
@property
|
||||
def summary(self):
|
||||
return truncate_html_words(self.content, 50)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
from operator import attrgetter
|
||||
from itertools import chain
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
import os
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from jinja2.exceptions import TemplateNotFound
|
||||
|
||||
from pelican.utils import update_dict, copytree
|
||||
from pelican.utils import update_dict, copytree, process_translations
|
||||
from pelican.contents import Article, Page, is_valid_content
|
||||
from pelican.readers import read_file
|
||||
|
||||
|
|
@ -75,7 +77,8 @@ class ArticlesGenerator(Generator):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""initialize properties"""
|
||||
self.articles = []
|
||||
self.articles = [] # only articles in default language
|
||||
self.translations = []
|
||||
self.dates = {}
|
||||
self.tags = {}
|
||||
self.categories = {}
|
||||
|
|
@ -103,10 +106,21 @@ class ArticlesGenerator(Generator):
|
|||
if 'TAG_FEED' in self.settings:
|
||||
for tag, arts in self.tags.items():
|
||||
arts.sort(key=attrgetter('date'), reverse=True)
|
||||
writer.write_feed(arts, self.context, self.settings['TAG_FEED'] % tag)
|
||||
writer.write_feed(arts, self.context,
|
||||
self.settings['TAG_FEED'] % tag)
|
||||
|
||||
if 'TAG_FEED_RSS' in self.settings:
|
||||
writer.write_feed(arts, self.context, self.settings['TAG_FEED_RSS'] % tag, feed_type='rss')
|
||||
writer.write_feed(arts, self.context,
|
||||
self.settings['TAG_FEED_RSS'] % tag, feed_type='rss')
|
||||
|
||||
translations_feeds = defaultdict(list)
|
||||
for article in self.translations:
|
||||
translations_feeds[article.lang].append(article)
|
||||
|
||||
for lang, items in translations_feeds.items():
|
||||
items.sort(key=attrgetter('date'), reverse=True)
|
||||
writer.write_feed(items, self.context,
|
||||
self.settings['TRANSLATION_FEED'] % lang)
|
||||
|
||||
|
||||
def generate_pages(self, writer):
|
||||
|
|
@ -124,8 +138,8 @@ class ArticlesGenerator(Generator):
|
|||
for cat in self.categories:
|
||||
write('category/%s.html' % cat, templates['category'], self.context,
|
||||
category=cat, articles=self.categories[cat])
|
||||
for article in self.articles:
|
||||
write('%s' % article.url,
|
||||
for article in chain(self.translations, self.articles):
|
||||
write(article.save_as,
|
||||
templates['article'], self.context, article=article,
|
||||
category=article.category)
|
||||
|
||||
|
|
@ -134,6 +148,7 @@ class ArticlesGenerator(Generator):
|
|||
|
||||
# return the list of files to use
|
||||
files = self.get_files(self.path, exclude=['pages',])
|
||||
all_articles = []
|
||||
for f in files:
|
||||
content, metadatas = read_file(f)
|
||||
|
||||
|
|
@ -157,16 +172,23 @@ class ArticlesGenerator(Generator):
|
|||
if not is_valid_content(article, f):
|
||||
continue
|
||||
|
||||
update_dict(self.categories, article.category, article)
|
||||
if hasattr(article, 'tags'):
|
||||
for tag in article.tags:
|
||||
update_dict(self.tags, tag, article)
|
||||
self.articles.append(article)
|
||||
all_articles.append(article)
|
||||
|
||||
self.articles, self.translations = process_translations(all_articles)
|
||||
|
||||
for article in self.articles:
|
||||
# only main articles are listed in categories, not translations
|
||||
update_dict(self.categories, article.category, article)
|
||||
|
||||
|
||||
# sort the articles by date
|
||||
self.articles.sort(key=attrgetter('date'), reverse=True)
|
||||
self.dates = list(self.articles)
|
||||
self.dates.sort(key=attrgetter('date'), reverse=self.context['REVERSE_ARCHIVE_ORDER'])
|
||||
self.dates.sort(key=attrgetter('date'),
|
||||
reverse=self.context['REVERSE_ARCHIVE_ORDER'])
|
||||
# and generate the output :)
|
||||
self._update_context(('articles', 'dates', 'tags', 'categories'))
|
||||
|
||||
|
|
@ -183,21 +205,24 @@ class PagesGenerator(Generator):
|
|||
super(PagesGenerator, self).__init__(*args, **kwargs)
|
||||
|
||||
def generate_context(self):
|
||||
all_pages = []
|
||||
for f in self.get_files(os.sep.join((self.path, 'pages'))):
|
||||
content, metadatas = read_file(f)
|
||||
page = Page(content, metadatas, settings=self.settings,
|
||||
filename=f)
|
||||
if not is_valid_content(page, f):
|
||||
continue
|
||||
self.pages.append(page)
|
||||
all_pages.append(page)
|
||||
|
||||
self.pages, self.translations = process_translations(all_pages)
|
||||
|
||||
self._update_context(('pages', ))
|
||||
self.context['PAGES'] = self.pages
|
||||
|
||||
def generate_output(self, writer):
|
||||
templates = self.get_templates()
|
||||
for page in self.pages:
|
||||
writer.write_file('pages/%s' % page.url, templates['page'],
|
||||
for page in chain(self.translations, self.pages):
|
||||
writer.write_file('pages/%s' % page.save_as, templates['page'],
|
||||
self.context, page=page)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -50,11 +50,10 @@ class MarkdownReader(object):
|
|||
|
||||
metadatas = {}
|
||||
for name, value in md.Meta.items():
|
||||
if name in _METADATAS_FIELDS:
|
||||
meta = _METADATAS_FIELDS[name](value[0])
|
||||
else:
|
||||
meta = value[0]
|
||||
metadatas[name.lower()] = meta
|
||||
name = name.lower()
|
||||
metadatas[name] = _METADATAS_FIELDS.get(
|
||||
name, lambda x:x
|
||||
)(value[0])
|
||||
return content, metadatas
|
||||
|
||||
_EXTENSIONS = {'rst': RstReader, 'md': MarkdownReader} # supported formats
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ _DEFAULT_CONFIG = {'PATH': None,
|
|||
'THEME_STATIC_PATHS': ['static',],
|
||||
'FEED': 'feeds/all.atom.xml',
|
||||
'CATEGORY_FEED': 'feeds/%s.atom.xml',
|
||||
'TRANSLATION_FEED': 'feeds/all-%s.atom.xml',
|
||||
'SITENAME': 'A Pelican Blog',
|
||||
'DISPLAY_PAGES_ON_MENU': True,
|
||||
'PDF_GENERATOR': False,
|
||||
|
|
@ -18,6 +19,7 @@ _DEFAULT_CONFIG = {'PATH': None,
|
|||
'CSS_FILE': 'main.css',
|
||||
'REVERSE_ARCHIVE_ORDER': False,
|
||||
'KEEP_OUTPUT_DIRECTORY': False,
|
||||
'CLEAN_URLS': False, # use /blah/ instead /blah.html in urls
|
||||
}
|
||||
|
||||
def read_settings(filename):
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import os
|
|||
import shutil
|
||||
from datetime import datetime
|
||||
from codecs import open as _open
|
||||
from itertools import groupby
|
||||
from operator import attrgetter
|
||||
|
||||
def update_dict(mapping, key, value):
|
||||
"""Update a dict intenal list
|
||||
|
|
@ -147,3 +149,38 @@ def truncate_html_words(s, num, end_text='...'):
|
|||
# Return string
|
||||
return out
|
||||
|
||||
|
||||
def process_translations(content_list):
|
||||
""" Finds all translation and returns
|
||||
tuple with two lists (index, translations).
|
||||
Index list includes items in default language
|
||||
or items which have no variant in default language.
|
||||
|
||||
Also, for each content_list item, it
|
||||
sets attribute 'translations'
|
||||
"""
|
||||
grouped_by_slugs = groupby(content_list, attrgetter('slug'))
|
||||
index = []
|
||||
translations = []
|
||||
|
||||
for slug, items in grouped_by_slugs:
|
||||
items = list(items)
|
||||
# find items with default language
|
||||
default_lang_items = filter(
|
||||
attrgetter('in_default_lang'),
|
||||
items
|
||||
)
|
||||
len_ = len(default_lang_items)
|
||||
if len_ > 1:
|
||||
print u' [warning] there are %s variants of "%s"' % (len_, slug)
|
||||
elif len_ == 0:
|
||||
default_lang_items = items[:1]
|
||||
|
||||
index.extend(default_lang_items)
|
||||
translations.extend(filter(
|
||||
lambda x: x not in default_lang_items,
|
||||
items
|
||||
))
|
||||
for a in items:
|
||||
a.translations = filter(lambda x: x != a, items)
|
||||
return index, translations
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue