mirror of
https://github.com/getpelican/pelican.git
synced 2025-10-15 20:28:56 +02:00
Refactoring, Again :)
Added some more notes about how this is working on the documentation. I do think that the overall structure is clearer now, and easiest to understand. After all, that's how it should always be ! --HG-- rename : pelican/processors.py => pelican/generators.py rename : pelican/generators.py => pelican/writers.py
This commit is contained in:
parent
3b7c546136
commit
836d4ea117
12 changed files with 474 additions and 345 deletions
|
|
@ -177,6 +177,10 @@ Source code
|
|||
You can access the source code via mercurial at http://hg.notmyidea.org/pelican/
|
||||
or via git on http://github.com/ametaireau/pelican/
|
||||
|
||||
If you feel hackish, have a look to the `pelican's internals explanations
|
||||
<http://alexis.notmyidea.org/pelican/internals.html>`_.
|
||||
|
||||
|
||||
Feedback !
|
||||
----------
|
||||
|
||||
|
|
|
|||
1
THANKS
1
THANKS
|
|
@ -8,3 +8,4 @@ bugs or giving ideas. Thanks to them !
|
|||
- Jérome Renard
|
||||
- Nicolas Martin
|
||||
- David Kulak
|
||||
- Arnaud Bos
|
||||
|
|
|
|||
39
bin/pelican
39
bin/pelican
|
|
@ -1,38 +1,3 @@
|
|||
#!/usr/bin/env python
|
||||
import argparse
|
||||
|
||||
from pelican.generators import Generator
|
||||
from pelican.processors import (ArticlesProcessor, PagesProcessor,
|
||||
StaticProcessor, PdfProcessor)
|
||||
|
||||
parser = argparse.ArgumentParser(description="""A tool to generate a
|
||||
static blog, with restructured text input files.""")
|
||||
|
||||
parser.add_argument(dest='path',
|
||||
help='Path where to find the content files')
|
||||
parser.add_argument('-t', '--theme-path', dest='theme',
|
||||
help='Path where to find the theme templates. If not specified, it will'
|
||||
'use the default one included with pelican.')
|
||||
parser.add_argument('-o', '--output', dest='output',
|
||||
help='Where to output the generated files. If not specified, a directory'
|
||||
' will be created, named "output" in the current path.')
|
||||
parser.add_argument('-m', '--markup', default='rst, md', dest='markup',
|
||||
help='the markup language to use. Currently only ReSTreucturedtext is'
|
||||
' available.')
|
||||
parser.add_argument('-s', '--settings', dest='settings',
|
||||
help='the settings of the application. Default to None.')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
markup = [a.split()[0] for a in args.markup.split(',')]
|
||||
|
||||
generator = Generator(args.settings, args.path, args.theme,
|
||||
args.output, markup)
|
||||
|
||||
processors = [ArticlesProcessor, PagesProcessor, StaticProcessor]
|
||||
if generator.settings['PDF_PROCESSOR']:
|
||||
processors.append(PdfProcessor)
|
||||
|
||||
generator.run(processors)
|
||||
print "Enjoy !"
|
||||
from pelican import main
|
||||
main()
|
||||
|
|
|
|||
81
docs/internals.rst
Normal file
81
docs/internals.rst
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
Pelican internals
|
||||
#################
|
||||
|
||||
This section describe how pelican is working internally. As you'll see, it's
|
||||
quite simple, but a bit of documentation doesn't hurt :)
|
||||
|
||||
Overall structure
|
||||
=================
|
||||
|
||||
What `pelican` does, is taking a list of files, and processing them, to some
|
||||
sort of output. Usually, the files are restructured text and markdown files,
|
||||
and the output is a blog, but it can be anything you want.
|
||||
|
||||
I've separated the logic in different classes and concepts:
|
||||
|
||||
* `writers` are responsible of all the writing process of the
|
||||
files. It's writing .html files, RSS feeds and so on. Since those operations
|
||||
are commonly used, the object is created once, and then passed to the
|
||||
generators.
|
||||
|
||||
* `readers` are used to read from various formats (Markdown, and Restructured
|
||||
Text for now, but the system is extensible). Given a file, they return
|
||||
metadata (author, tags, category etc) and content (HTML formated)
|
||||
|
||||
* `generators` generate the different outputs. For instance, pelican comes with
|
||||
`ArticlesGenerator` and `PageGenerator`, into others. Given
|
||||
a configurations, they can do whatever they want. Most of the time it's
|
||||
generating files from inputs.
|
||||
|
||||
* `pelican` also uses `templates`, so it's easy to write you own theme. The
|
||||
syntax is `jinja2`, and, trust me, really easy to learn, so don't hesitate
|
||||
a second.
|
||||
|
||||
How to implement a new reader ?
|
||||
===============================
|
||||
|
||||
There is an awesome markup language you want to add to pelican ?
|
||||
Well, the only thing you have to do is to create a class that have a `read`
|
||||
method, that is returning an HTML content and some metadata.
|
||||
|
||||
Take a look to the Markdown reader::
|
||||
|
||||
class MarkdownReader(object):
|
||||
|
||||
def read(self, filename):
|
||||
"""Parse content and metadata of markdown files"""
|
||||
text = open(filename)
|
||||
md = Markdown(extensions = ['meta', 'codehilite'])
|
||||
content = md.convert(text)
|
||||
|
||||
metadatas = {}
|
||||
for name, value in md.Meta.items():
|
||||
if name in _METADATAS_FIELDS:
|
||||
meta = _METADATAS_FIELDS[name](value[0])
|
||||
else:
|
||||
meta = value[0]
|
||||
metadatas[name.lower()] = meta
|
||||
return content, metadatas
|
||||
|
||||
Simple isn't it ?
|
||||
|
||||
How to implement a new generator ?
|
||||
==================================
|
||||
|
||||
Generators have basically two important methods. You're not forced to create
|
||||
both, only the existing ones will be called.
|
||||
|
||||
* `generate_context`, that is called in a first place, for all the generators.
|
||||
Do whatever you have to do, and update the global context if needed. This
|
||||
context is shared between all generators, and will be passed to the
|
||||
templates. For instance, the `PageGenerator` `generate_context` method find
|
||||
all the pages, transform them into objects, and populate the context with
|
||||
them. Be careful to *not* output anything using this context at this stage,
|
||||
as it is likely to change by the effect of others generators.
|
||||
|
||||
* `generate_output` is then called. And guess what is it made for ? Oh,
|
||||
generating the output :) That's here that you may want to look at the context
|
||||
and call the methods of the `writer` object, that is passed at the first
|
||||
argument of this function. In the `PageGenerator` example, this method will
|
||||
look at all the pages recorded in the global context, and output a file on
|
||||
the disk (using the writer method `write_file`) for each page encountered.
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
import argparse
|
||||
import os
|
||||
|
||||
from pelican.settings import read_settings
|
||||
from pelican.utils import clean_output_dir
|
||||
from pelican.writers import Writer
|
||||
from pelican.generators import (ArticlesGenerator, PagesGenerator,
|
||||
StaticGenerator, PdfGenerator)
|
||||
|
||||
|
||||
def init_params(settings=None, path=None, theme=None, output_path=None,
|
||||
markup=None):
|
||||
"""Read the settings, and performs some checks on the environment
|
||||
before doing anything else.
|
||||
"""
|
||||
if settings is None:
|
||||
settings = {}
|
||||
settings = read_settings(settings)
|
||||
path = path or settings['PATH']
|
||||
if path.endswith('/'):
|
||||
path = path[:-1]
|
||||
|
||||
# define the default settings
|
||||
theme = theme or settings['THEME']
|
||||
output_path = output_path or settings['OUTPUT_PATH']
|
||||
output_path = os.path.realpath(output_path)
|
||||
markup = markup or settings['MARKUP']
|
||||
|
||||
# find the theme in pelican.theme if the given one does not exists
|
||||
if not os.path.exists(theme):
|
||||
theme_path = os.sep.join([os.path.dirname(
|
||||
os.path.abspath(__file__)), "themes/%s" % theme])
|
||||
if os.path.exists(theme_path):
|
||||
theme = theme_path
|
||||
else:
|
||||
raise Exception("Impossible to find the theme %s" % theme)
|
||||
|
||||
if 'SITEURL' not in settings:
|
||||
settings['SITEURL'] = output_path
|
||||
|
||||
# get the list of files to parse
|
||||
if not path:
|
||||
raise Exception('you need to specify a path to search the docs on !')
|
||||
|
||||
return settings, path, theme, output_path, markup
|
||||
|
||||
|
||||
def run_generators(generators, settings, path, theme, output_path, markup):
|
||||
"""Run the generators and return"""
|
||||
|
||||
context = settings.copy()
|
||||
generators = [p(context, settings, path, theme, output_path, markup)
|
||||
for p in generators]
|
||||
|
||||
writer = Writer(output_path)
|
||||
|
||||
for p in generators:
|
||||
if hasattr(p, 'generate_context'):
|
||||
p.generate_context()
|
||||
|
||||
# erase the directory if it is not the source
|
||||
if output_path not in os.path.realpath(path):
|
||||
clean_output_dir(output_path)
|
||||
|
||||
for p in generators:
|
||||
if hasattr(p, 'generate_output'):
|
||||
p.generate_output(writer)
|
||||
|
||||
|
||||
def run_pelican(settings, path, theme, output_path, markup):
|
||||
"""Run pelican with the given parameters"""
|
||||
|
||||
params = init_params(settings, path, theme, output_path, markup)
|
||||
generators = [ArticlesGenerator, PagesGenerator, StaticGenerator]
|
||||
if params[0]['PDF_GENERATOR']: # param[0] is settings
|
||||
processors.append(PdfGenerator)
|
||||
run_generators(generators, *params)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="""A tool to generate a
|
||||
static blog, with restructured text input files.""")
|
||||
|
||||
parser.add_argument(dest='path',
|
||||
help='Path where to find the content files')
|
||||
parser.add_argument('-t', '--theme-path', dest='theme',
|
||||
help='Path where to find the theme templates. If not specified, it will'
|
||||
'use the default one included with pelican.')
|
||||
parser.add_argument('-o', '--output', dest='output',
|
||||
help='Where to output the generated files. If not specified, a directory'
|
||||
' will be created, named "output" in the current path.')
|
||||
parser.add_argument('-m', '--markup', default='rst, md', dest='markup',
|
||||
help='the markup language to use. Currently only ReSTreucturedtext is'
|
||||
' available.')
|
||||
parser.add_argument('-s', '--settings', dest='settings',
|
||||
help='the settings of the application. Default to None.')
|
||||
args = parser.parse_args()
|
||||
markup = [a.split()[0] for a in args.markup.split(',')]
|
||||
|
||||
run_pelican(args.settings, args.path, args.theme, args.output, markup)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -2,7 +2,7 @@ from pelican.utils import slugify, truncate_html_words
|
|||
|
||||
|
||||
class Page(object):
|
||||
"""Represents a page..
|
||||
"""Represents a page
|
||||
Given a content, and metadatas, create an adequate object.
|
||||
|
||||
:param string: the string to parse, containing the original content.
|
||||
|
|
|
|||
|
|
@ -1,137 +1,29 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from operator import attrgetter
|
||||
from datetime import datetime
|
||||
import os
|
||||
from codecs import open
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from jinja2.exceptions import TemplateNotFound
|
||||
from feedgenerator import Atom1Feed, Rss201rev2Feed
|
||||
|
||||
from pelican.settings import read_settings
|
||||
from pelican.utils import clean_output_dir
|
||||
from pelican.utils import update_dict, copytree
|
||||
from pelican.contents import Article, Page, is_valid_content
|
||||
from pelican.readers import read_file
|
||||
|
||||
_TEMPLATES = ('index', 'tag', 'tags', 'article', 'category', 'categories',
|
||||
'archives', 'page')
|
||||
_DIRECT_TEMPLATES = ('index', 'tags', 'categories', 'archives')
|
||||
|
||||
|
||||
class Generator(object):
|
||||
"""Handle all generation process: files writes, feed creation, and this
|
||||
kind of basic stuff"""
|
||||
|
||||
def __init__(self, settings=None, path=None, theme=None, output_path=None,
|
||||
markup=None):
|
||||
"""Read the settings, and performs some checks on the environment
|
||||
before doing anything else.
|
||||
"""
|
||||
if settings is None:
|
||||
settings = {}
|
||||
self.settings = read_settings(settings)
|
||||
self.path = path or self.settings['PATH']
|
||||
if self.path.endswith('/'):
|
||||
self.path = self.path[:-1]
|
||||
"""Baseclass generator"""
|
||||
|
||||
self.theme = theme or self.settings['THEME']
|
||||
output_path = output_path or self.settings['OUTPUT_PATH']
|
||||
self.output_path = os.path.realpath(output_path)
|
||||
self.markup = markup or self.settings['MARKUP']
|
||||
def __init__(self, *args, **kwargs):
|
||||
for idx, item in enumerate(('context', 'settings', 'path', 'theme',
|
||||
'output_path', 'markup')):
|
||||
setattr(self, item, args[idx])
|
||||
|
||||
if not os.path.exists(self.theme):
|
||||
theme_path = os.sep.join([os.path.dirname(
|
||||
os.path.abspath(__file__)), "themes/%s" % self.theme])
|
||||
if os.path.exists(theme_path):
|
||||
self.theme = theme_path
|
||||
else:
|
||||
raise Exception("Impossible to find the theme %s" % self.theme)
|
||||
|
||||
if 'SITEURL' not in self.settings:
|
||||
self.settings['SITEURL'] = self.output_path
|
||||
|
||||
# get the list of files to parse
|
||||
if not path:
|
||||
raise Exception('you need to specify a path to search the docs on !')
|
||||
|
||||
def run(self, processors):
|
||||
"""Get the context from each processor, and then process them"""
|
||||
context = self.settings.copy()
|
||||
processors = [p() for p in processors]
|
||||
|
||||
for p in processors:
|
||||
if hasattr(p, 'preprocess'):
|
||||
p.preprocess(context, self)
|
||||
|
||||
if self.output_path not in os.path.realpath(self.path):
|
||||
clean_output_dir(self.output_path)
|
||||
|
||||
for p in processors:
|
||||
p.process(context, self)
|
||||
|
||||
def generate_feed(self, elements, context, filename=None,
|
||||
feed_type='atom'):
|
||||
"""Generate a feed with the list of articles provided
|
||||
|
||||
Return the feed. If no output_path or filename is specified, just return
|
||||
the feed object.
|
||||
|
||||
:param articles: the articles to put on the feed.
|
||||
:param context: the context to get the feed metadata.
|
||||
:param output_path: where to output the file.
|
||||
:param filename: the filename to output.
|
||||
:param feed_type: the feed type to use (atom or rss)
|
||||
"""
|
||||
site_url = context.get('SITEURL', self._get_relative_siteurl(filename))
|
||||
|
||||
feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed
|
||||
|
||||
feed = feed_class(
|
||||
title=context['SITENAME'],
|
||||
link=site_url,
|
||||
feed_url= "%s/%s" % (site_url, filename),
|
||||
description=context.get('SITESUBTITLE', ''))
|
||||
for element in elements:
|
||||
feed.add_item(
|
||||
title=element.title,
|
||||
link= "%s/%s" % (site_url, element.url),
|
||||
description=element.content,
|
||||
categories=element.tags if hasattr(element, "tags") else None,
|
||||
author_name=getattr(element, 'author', 'John Doe'),
|
||||
pubdate=element.date)
|
||||
|
||||
if filename:
|
||||
complete_path = os.path.join(self.output_path, filename)
|
||||
try:
|
||||
os.makedirs(os.path.dirname(complete_path))
|
||||
except Exception:
|
||||
pass
|
||||
fp = open(complete_path, 'w')
|
||||
feed.write(fp, 'utf-8')
|
||||
print u' [ok] writing %s' % complete_path
|
||||
|
||||
fp.close()
|
||||
return feed
|
||||
|
||||
def generate_file(self, name, template, context, relative_urls=True,
|
||||
**kwargs):
|
||||
"""Write the file with the given informations
|
||||
|
||||
:param name: name of the file to output
|
||||
:param template: template to use to generate the content
|
||||
:param context: dict to pass to the templates.
|
||||
:param relative_urls: use relative urls or absolutes ones
|
||||
:param **kwargs: additional variables to pass to the templates
|
||||
"""
|
||||
context = context.copy()
|
||||
if relative_urls:
|
||||
context['SITEURL'] = self._get_relative_siteurl(name)
|
||||
|
||||
context.update(kwargs)
|
||||
output = template.render(context)
|
||||
filename = os.sep.join((self.output_path, name))
|
||||
try:
|
||||
os.makedirs(os.path.dirname(filename))
|
||||
except Exception:
|
||||
pass
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
f.write(output)
|
||||
print u' [ok] writing %s' % filename
|
||||
for arg, value in kwargs.items():
|
||||
setattr(self, arg, value)
|
||||
|
||||
def get_templates(self):
|
||||
"""Return the templates to use.
|
||||
|
|
@ -166,7 +58,181 @@ class Generator(object):
|
|||
files.extend([os.sep.join((root, f)) for f in temp_files
|
||||
if True in [f.endswith(ext) for ext in extensions]])
|
||||
return files
|
||||
|
||||
def _get_relative_siteurl(self, filename):
|
||||
"""Return the siteurl relative to the given filename"""
|
||||
return '../' * filename.count('/') + '.'
|
||||
|
||||
def _update_context(self, items):
|
||||
"""Update the context with the given items from the currrent
|
||||
processor.
|
||||
"""
|
||||
for item in items:
|
||||
value = getattr(self, item)
|
||||
if hasattr(value, 'items'):
|
||||
value = value.items()
|
||||
self.context[item] = value
|
||||
|
||||
|
||||
class ArticlesGenerator(Generator):
|
||||
"""Generate blog articles"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""initialize properties"""
|
||||
self.articles = []
|
||||
self.dates = {}
|
||||
self.tags = {}
|
||||
self.categories = {}
|
||||
super(ArticlesGenerator, self).__init__(*args, **kwargs)
|
||||
|
||||
def generate_feeds(self, writer):
|
||||
"""Generate the feeds from the current context, and output files."""
|
||||
|
||||
writer.write_feed(self.articles, self.context, self.settings['FEED'])
|
||||
|
||||
if 'FEED_RSS' in self.settings:
|
||||
writer.write_feed(self.articles, self.context,
|
||||
self.settings['FEED_RSS'], feed_type='rss')
|
||||
|
||||
for cat, arts in self.categories.items():
|
||||
arts.sort(key=attrgetter('date'), reverse=True)
|
||||
writer.write_feed(arts, self.context,
|
||||
self.settings['CATEGORY_FEED'] % cat)
|
||||
|
||||
if 'CATEGORY_FEED_RSS' in self.settings:
|
||||
writer.write_feed(arts, self.context,
|
||||
self.settings['CATEGORY_FEED_RSS'] % cat,
|
||||
feed_type='rss')
|
||||
|
||||
|
||||
def generate_pages(self, writer):
|
||||
"""Generate the pages on the disk
|
||||
TODO: change the name"""
|
||||
|
||||
templates = self.get_templates()
|
||||
write = writer.write_file
|
||||
for template in _DIRECT_TEMPLATES:
|
||||
write('%s.html' % template, templates[template], self.context,
|
||||
blog=True)
|
||||
for tag in self.tags:
|
||||
write('tag/%s.html' % tag, templates['tag'], self.context, tag=tag)
|
||||
for cat in self.categories:
|
||||
write('category/%s.html' % cat, templates['category'], self.context,
|
||||
category=cat, articles=self.categories[cat])
|
||||
for article in self.articles:
|
||||
write('%s' % article.url,
|
||||
templates['article'], self.context, article=article,
|
||||
category=article.category)
|
||||
|
||||
def generate_context(self):
|
||||
"""change the context"""
|
||||
|
||||
# return the list of files to use
|
||||
files = self.get_files(self.path, exclude=['pages',])
|
||||
for f in files:
|
||||
content, metadatas = read_file(f)
|
||||
|
||||
# if no category is set, use the name of the path as a category
|
||||
if 'category' not in metadatas.keys():
|
||||
category = os.path.dirname(f).replace(
|
||||
os.path.expanduser(self.path)+'/', '')
|
||||
|
||||
if category == self.path:
|
||||
category = self.settings['DEFAULT_CATEGORY']
|
||||
|
||||
if category != '':
|
||||
metadatas['category'] = unicode(category)
|
||||
|
||||
if 'date' not in metadatas.keys()\
|
||||
and self.settings['FALLBACK_ON_FS_DATE']:
|
||||
metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)
|
||||
|
||||
article = Article(content, metadatas, settings=self.settings,
|
||||
filename=f)
|
||||
if not is_valid_content(article, f):
|
||||
continue
|
||||
|
||||
update_dict(self.categories, article.category, article)
|
||||
if hasattr(article, 'tags'):
|
||||
for tag in article.tags:
|
||||
update_dict(self.tags, tag, article)
|
||||
self.articles.append(article)
|
||||
|
||||
# sort the articles by date
|
||||
self.articles.sort(key=attrgetter('date'), reverse=True)
|
||||
self.dates = list(self.articles)
|
||||
self.dates.sort(key=attrgetter('date'))
|
||||
# and generate the output :)
|
||||
self._update_context(('articles', 'dates', 'tags', 'categories'))
|
||||
|
||||
def generate_output(self, writer):
|
||||
self.generate_feeds(writer)
|
||||
self.generate_pages(writer)
|
||||
|
||||
|
||||
class PagesGenerator(Generator):
|
||||
"""Generate pages"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.pages = []
|
||||
super(PagesGenerator, self).__init__(*args, **kwargs)
|
||||
|
||||
def generate_context(self):
|
||||
for f in self.get_files(os.sep.join((self.path, 'pages'))):
|
||||
content, metadatas = read_file(f)
|
||||
page = Page(content, metadatas, settings=self.settings,
|
||||
filename=f)
|
||||
if not is_valid_content(page, f):
|
||||
continue
|
||||
self.pages.append(page)
|
||||
|
||||
self._update_context(('pages', ))
|
||||
|
||||
def generate_output(self, writer):
|
||||
templates = self.get_templates()
|
||||
for page in self.pages:
|
||||
writer.write_file('pages/%s' % page.url, templates['page'],
|
||||
self.context, page=page)
|
||||
self._update_context(('pages',))
|
||||
|
||||
|
||||
class StaticGenerator(Generator):
|
||||
"""copy static paths to output"""
|
||||
|
||||
def _copy_paths(self, paths, source, destination, output_path,
|
||||
final_path=None):
|
||||
for path in paths:
|
||||
copytree(path, source, os.path.join(output_path, destination),
|
||||
final_path)
|
||||
|
||||
def generate_output(self, writer):
|
||||
self._copy_paths(self.settings['STATIC_PATHS'], self.path,
|
||||
'static', self.output_path)
|
||||
self._copy_paths(self.settings['THEME_PATHS'], self.theme,
|
||||
'theme', self.output_path, '.')
|
||||
|
||||
|
||||
class PdfGenerator(Generator):
|
||||
"""Generate PDFs on the output dir, for all articles and pages coming from
|
||||
rst"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
try:
|
||||
from rst2pdf.createpdf import RstToPdf
|
||||
self.pdfcreator = RstToPdf(breakside=0, stylesheets=['twelvepoint'])
|
||||
except ImportError:
|
||||
raise Exception("unable to find rst2pdf")
|
||||
super(PdfGenerator, self).__init(*args, **kwargs)
|
||||
|
||||
def _create_pdf(self, obj, output_path):
|
||||
if obj.filename.endswith(".rst"):
|
||||
self.pdfcreator.createPdf(text=open(obj.filename).read(),
|
||||
output=os.path.join(output_path, "%s.pdf" % obj.slug))
|
||||
|
||||
def generate_context(self):
|
||||
pdf_path = os.path.join(self.output_path, 'pdf')
|
||||
try:
|
||||
os.mkdir(pdf_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
for article in self.context['articles']:
|
||||
self._create_pdf(article, pdf_path)
|
||||
|
||||
for page in self.context['pages']:
|
||||
self._create_pdf(page, pdf_path)
|
||||
|
|
|
|||
|
|
@ -1,179 +0,0 @@
|
|||
from operator import attrgetter
|
||||
from datetime import datetime
|
||||
import os
|
||||
|
||||
from pelican.utils import update_dict, copytree
|
||||
from pelican.contents import Article, Page, is_valid_content
|
||||
from pelican.readers import read_file
|
||||
|
||||
_DIRECT_TEMPLATES = ('index', 'tags', 'categories', 'archives')
|
||||
|
||||
|
||||
class Processor(object):
|
||||
|
||||
def _update_context(self, context, items):
|
||||
"""Update the context with the given items from the currrent
|
||||
processor.
|
||||
"""
|
||||
for item in items:
|
||||
value = getattr(self, item)
|
||||
if hasattr(value, 'items'):
|
||||
value = value.items()
|
||||
context[item] = value
|
||||
|
||||
|
||||
class ArticlesProcessor(Processor):
|
||||
|
||||
def __init__(self, settings=None):
|
||||
self.articles = []
|
||||
self.dates = {}
|
||||
self.tags = {}
|
||||
self.categories = {}
|
||||
|
||||
def generate_feeds(self, context, generator):
|
||||
"""Generate the feeds from the current context, and output files."""
|
||||
|
||||
generator.generate_feed(self.articles, context, context['FEED'])
|
||||
|
||||
if 'FEED_RSS' in context:
|
||||
generator.generate_feed(self.articles, context,
|
||||
context['FEED_RSS'], feed_type='rss')
|
||||
|
||||
for cat, arts in self.categories.items():
|
||||
arts.sort(key=attrgetter('date'), reverse=True)
|
||||
generator.generate_feed(arts, context,
|
||||
context['CATEGORY_FEED'] % cat)
|
||||
|
||||
if 'CATEGORY_FEED_RSS' in context:
|
||||
generator.generate_feed(arts, context,
|
||||
context['CATEGORY_FEED_RSS'] % cat,
|
||||
feed_type='rss')
|
||||
|
||||
|
||||
def generate_pages(self, context, generator):
|
||||
"""Generate the pages on the disk"""
|
||||
|
||||
templates = generator.get_templates()
|
||||
generate = generator.generate_file
|
||||
for template in _DIRECT_TEMPLATES:
|
||||
generate('%s.html' % template, templates[template], context, blog=True)
|
||||
for tag in self.tags:
|
||||
generate('tag/%s.html' % tag, templates['tag'], context, tag=tag)
|
||||
for cat in self.categories:
|
||||
generate('category/%s.html' % cat, templates['category'], context,
|
||||
category=cat, articles=self.categories[cat])
|
||||
for article in self.articles:
|
||||
generate('%s' % article.url,
|
||||
templates['article'], context, article=article,
|
||||
category=article.category)
|
||||
|
||||
def preprocess(self, context, generator):
|
||||
|
||||
# build the list of articles / categories / etc.
|
||||
files = generator.get_files(generator.path, exclude=['pages',])
|
||||
for f in files:
|
||||
content, metadatas = read_file(f)
|
||||
if 'category' not in metadatas.keys():
|
||||
category = os.path.dirname(f).replace(
|
||||
os.path.expanduser(generator.path)+'/', '')
|
||||
|
||||
if category == generator.path:
|
||||
category = context['DEFAULT_CATEGORY']
|
||||
|
||||
if category != '':
|
||||
metadatas['category'] = unicode(category)
|
||||
|
||||
if 'date' not in metadatas.keys() and context['FALLBACK_ON_FS_DATE']:
|
||||
metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)
|
||||
|
||||
article = Article(content, metadatas, settings=generator.settings,
|
||||
filename=f)
|
||||
if not is_valid_content(article, f):
|
||||
continue
|
||||
|
||||
update_dict(self.categories, article.category, article)
|
||||
if hasattr(article, 'tags'):
|
||||
for tag in article.tags:
|
||||
update_dict(self.tags, tag, article)
|
||||
self.articles.append(article)
|
||||
|
||||
# sort the articles by date
|
||||
self.articles.sort(key=attrgetter('date'), reverse=True)
|
||||
self.dates = list(self.articles)
|
||||
self.dates.sort(key=attrgetter('date'))
|
||||
# and generate the output :)
|
||||
self._update_context(context, ('articles', 'dates', 'tags', 'categories'))
|
||||
|
||||
def process(self, context, generator):
|
||||
self.generate_feeds(context, generator)
|
||||
self.generate_pages(context, generator)
|
||||
|
||||
|
||||
class PagesProcessor(Processor):
|
||||
"""Generate pages"""
|
||||
|
||||
def __init__(self):
|
||||
self.pages = []
|
||||
|
||||
def preprocess(self, context, generator):
|
||||
for f in generator.get_files(os.sep.join((generator.path, 'pages'))):
|
||||
content, metadatas = read_file(f)
|
||||
page = Page(content, metadatas, settings=generator.settings,
|
||||
filename=f)
|
||||
if not is_valid_content(page, f):
|
||||
continue
|
||||
self.pages.append(page)
|
||||
|
||||
context['PAGES'] = self.pages
|
||||
|
||||
def process(self, context, generator):
|
||||
templates = generator.get_templates()
|
||||
for page in self.pages:
|
||||
generator.generate_file('pages/%s' % page.url,
|
||||
templates['page'], context, page=page)
|
||||
self._update_context(context, ('pages',))
|
||||
|
||||
|
||||
class StaticProcessor(Processor):
|
||||
"""copy static paths to output"""
|
||||
|
||||
def _copy_paths(self, paths, source, destination, output_path,
|
||||
final_path=None):
|
||||
for path in paths:
|
||||
copytree(path, source, os.path.join(output_path, destination),
|
||||
final_path)
|
||||
|
||||
def process(self, context, generator):
|
||||
self._copy_paths(generator.settings['STATIC_PATHS'], generator.path,
|
||||
'static', generator.output_path)
|
||||
self._copy_paths(generator.settings['THEME_PATHS'], generator.theme,
|
||||
'theme', generator.output_path, '.')
|
||||
|
||||
|
||||
class PdfProcessor(Processor):
|
||||
"""Generate PDFs on the output dir, for all articles and pages coming from
|
||||
rst"""
|
||||
def __init__(self):
|
||||
try:
|
||||
from rst2pdf.createpdf import RstToPdf
|
||||
self.pdfcreator = RstToPdf(breakside=0, stylesheets=['twelvepoint'])
|
||||
except ImportError:
|
||||
raise Exception("unable to find rst2pdf")
|
||||
|
||||
def _create_pdf(self, obj, output_path):
|
||||
if obj.filename.endswith(".rst"):
|
||||
self.pdfcreator.createPdf(text=open(obj.filename).read(),
|
||||
output=os.path.join(output_path, "%s.pdf" % obj.slug))
|
||||
|
||||
def process(self, context, generator):
|
||||
pdf_path = os.path.join(generator.output_path, 'pdf')
|
||||
try:
|
||||
os.mkdir(pdf_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
for article in context['articles']:
|
||||
self._create_pdf(article, pdf_path)
|
||||
|
||||
for page in context['pages']:
|
||||
self._create_pdf(page, pdf_path)
|
||||
|
|
@ -12,7 +12,7 @@ _DEFAULT_CONFIG = {'PATH': None,
|
|||
'CATEGORY_FEED': 'feeds/%s.atom.xml',
|
||||
'SITENAME': 'A Pelican Blog',
|
||||
'DISPLAY_PAGES_ON_MENU': True,
|
||||
'PDF_PROCESSOR': False,
|
||||
'PDF_GENERATOR': False,
|
||||
'DEFAULT_CATEGORY': 'misc',
|
||||
'FALLBACK_ON_FS_DATE': True,
|
||||
'CSS_FILE': 'main.css',
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
{% block content_title %}{% endblock %}
|
||||
{% block content %}
|
||||
{% if articles %}
|
||||
{% for article in articles %}
|
||||
{% if loop.index == 1 %}
|
||||
|
|
|
|||
|
|
@ -64,6 +64,7 @@ def copytree(path, origin, destination, topath=None):
|
|||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def clean_output_dir(path):
|
||||
"""Remove all the files from the output directory"""
|
||||
|
||||
|
|
@ -73,6 +74,12 @@ def clean_output_dir(path):
|
|||
except Exception as e:
|
||||
pass
|
||||
|
||||
|
||||
def get_relative_path(filename):
|
||||
"""Return the relative path to the given filename"""
|
||||
return '../' * filename.count('/') + '.'
|
||||
|
||||
|
||||
def truncate_html_words(s, num, end_text='...'):
|
||||
"""Truncates HTML to a certain number of words (not counting tags and
|
||||
comments). Closes opened tags if they were correctly closed in the given
|
||||
|
|
|
|||
80
pelican/writers.py
Normal file
80
pelican/writers.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
from codecs import open
|
||||
|
||||
from feedgenerator import Atom1Feed, Rss201rev2Feed
|
||||
|
||||
from pelican.utils import get_relative_path
|
||||
|
||||
class Writer(object):
|
||||
|
||||
def __init__(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def write_feed(self, elements, context, filename=None, feed_type='atom'):
|
||||
"""Generate a feed with the list of articles provided
|
||||
|
||||
Return the feed. If no output_path or filename is specified, just return
|
||||
the feed object.
|
||||
|
||||
:param articles: the articles to put on the feed.
|
||||
:param context: the context to get the feed metadata.
|
||||
:param output_path: where to output the file.
|
||||
:param filename: the filename to output.
|
||||
:param feed_type: the feed type to use (atom or rss)
|
||||
"""
|
||||
site_url = context.get('SITEURL', get_relative_path(filename))
|
||||
|
||||
feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed
|
||||
|
||||
feed = feed_class(
|
||||
title=context['SITENAME'],
|
||||
link=site_url,
|
||||
feed_url= "%s/%s" % (site_url, filename),
|
||||
description=context.get('SITESUBTITLE', ''))
|
||||
for element in elements:
|
||||
feed.add_item(
|
||||
title=element.title,
|
||||
link= "%s/%s" % (site_url, element.url),
|
||||
description=element.content,
|
||||
categories=element.tags if hasattr(element, "tags") else None,
|
||||
author_name=getattr(element, 'author', 'John Doe'),
|
||||
pubdate=element.date)
|
||||
|
||||
if filename:
|
||||
complete_path = os.path.join(self.output_path, filename)
|
||||
try:
|
||||
os.makedirs(os.path.dirname(complete_path))
|
||||
except Exception:
|
||||
pass
|
||||
fp = open(complete_path, 'w')
|
||||
feed.write(fp, 'utf-8')
|
||||
print u' [ok] writing %s' % complete_path
|
||||
|
||||
fp.close()
|
||||
return feed
|
||||
|
||||
def write_file(self, name, template, context, relative_urls=True,
|
||||
**kwargs):
|
||||
"""Render the template and write the file.
|
||||
|
||||
:param name: name of the file to output
|
||||
:param template: template to use to generate the content
|
||||
:param context: dict to pass to the templates.
|
||||
:param relative_urls: use relative urls or absolutes ones
|
||||
:param **kwargs: additional variables to pass to the templates
|
||||
"""
|
||||
localcontext = context.copy()
|
||||
if relative_urls:
|
||||
localcontext['SITEURL'] = get_relative_path(name)
|
||||
|
||||
localcontext.update(kwargs)
|
||||
output = template.render(localcontext)
|
||||
filename = os.sep.join((self.output_path, name))
|
||||
try:
|
||||
os.makedirs(os.path.dirname(filename))
|
||||
except Exception:
|
||||
pass
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
f.write(output)
|
||||
print u' [ok] writing %s' % filename
|
||||
Loading…
Add table
Add a link
Reference in a new issue