diff --git a/.gitignore b/.gitignore index 4029b327..9274ba2d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .*.swp .*.swo *.pyc +.DS_Store docs/_build docs/fr/_build build diff --git a/docs/fr/configuration.rst b/docs/fr/configuration.rst index 695a0b0e..fadaf258 100644 --- a/docs/fr/configuration.rst +++ b/docs/fr/configuration.rst @@ -98,6 +98,9 @@ GITHUB_URL : GOOGLE_ANALYTICS : 'UA-XXXX-YYYY' pour activer Google analytics ; + +GOSQUARED_SITENAME : + 'XXX-YYYYYY-X' pour activer GoSquared ; JINJA_EXTENSIONS : Liste d'extension Jinja2 que vous souhaitez utiliser ; diff --git a/docs/settings.rst b/docs/settings.rst index b36c9953..65561d5d 100644 --- a/docs/settings.rst +++ b/docs/settings.rst @@ -91,6 +91,12 @@ Setting name (default value) What doe index pages for collections of content e.g. tags and category index pages. `PAGINATED_DIRECT_TEMPLATES` (``('index',)``) Provides the direct templates that should be paginated. +`SUMMARY_MAX_LENGTH` (``50``) When creating a short summary of an article, this will + be the default length in words of the text created. + This only applies if your content does not otherwise + specify a summary. Setting to None will cause the summary + to be a copy of the original content. + ===================================================================== ===================================================================== .. [#] Default is the system locale. @@ -398,6 +404,7 @@ Setting name What does it do ? `GITHUB_URL` Your GitHub URL (if you have one). It will then use this information to create a GitHub ribbon. `GOOGLE_ANALYTICS` 'UA-XXXX-YYYY' to activate Google Analytics. +`GOSQUARED_SITENAME` 'XXX-YYYYYY-X' to activate GoSquared. `MENUITEMS` A list of tuples (Title, URL) for additional menu items to appear at the beginning of the main menu. `PIWIK_URL` URL to your Piwik server - without 'http://' at the diff --git a/pelican/readers.py b/pelican/readers.py index 83cb7e3b..83565918 100644 --- a/pelican/readers.py +++ b/pelican/readers.py @@ -13,11 +13,8 @@ try: from markdown import Markdown except ImportError: Markdown = False # NOQA -import cgi -from HTMLParser import HTMLParser import re - from pelican.contents import Category, Tag, Author from pelican.utils import get_date, open @@ -129,12 +126,13 @@ class MarkdownReader(Reader): metadata[name] = self.process_metadata(name, value[0]) return content, metadata -""" + class HtmlReader(Reader): file_extensions = ['html', 'htm'] _re = re.compile('\<\!\-\-\#\s?[A-z0-9_-]*\s?\:s?[A-z0-9\s_-]*\s?\-\-\>') def read(self, filename): + """Parse content and metadata of (x)HTML files""" with open(filename) as content: metadata = {'title': 'unnamed'} for i in self._re.findall(content): @@ -144,101 +142,6 @@ class HtmlReader(Reader): metadata[name] = self.process_metadata(name, value) return content, metadata -""" - -class PelicanHTMLParser(HTMLParser): - def __init__(self, settings): - HTMLParser.__init__(self) - self.body = '' - self.metadata = {} - self.settings = settings - - self._data_buffer = '' - - self._in_top_level = True - self._in_head = False - self._in_title = False - self._in_body = False - self._in_tags = False - - def handle_starttag(self, tag, attrs): - if tag == 'head' and self._in_top_level: - self._in_top_level = False - self._in_head = True - elif tag == 'title' and self._in_head: - self._in_title = True - self._data_buffer = '' - elif tag == 'body' and self._in_top_level: - self._in_top_level = False - self._in_body = True - self._data_buffer = '' - elif tag == 'meta' and self._in_head: - self._handle_meta_tag(attrs) - - elif self._in_body: - self._data_buffer += self.build_tag(tag, attrs, False) - - def handle_endtag(self, tag): - if tag == 'head': - if self._in_head: - self._in_head = False - self._in_top_level = True - elif tag == 'title': - self._in_title = False - self.metadata['title'] = self._data_buffer - elif tag == 'body': - self.body = self._data_buffer - self._in_body = False - self._in_top_level = True - elif self._in_body: - self._data_buffer += ''.format(cgi.escape(tag)) - - def handle_startendtag(self, tag, attrs): - if tag == 'meta' and self._in_head: - self._handle_meta_tag(attrs) - if self._in_body: - self._data_buffer += self.build_tag(tag, attrs, True) - - def handle_comment(self, data): - if self._in_body and data.strip() == 'PELICAN_END_SUMMARY': - self.metadata['summary'] = self._data_buffer - - def handle_data(self, data): - self._data_buffer += data - - def build_tag(self, tag, attrs, close_tag): - result = '<{}'.format(cgi.escape(tag)) - result += ''.join((' {}="{}"'.format(cgi.escape(k), cgi.escape(v)) for k,v in attrs)) - if close_tag: - return result + ' />' - return result + '>' - - def _handle_meta_tag(self, attrs): - name = self._attr_value(attrs, 'name') - contents = self._attr_value(attrs, 'contents', '') - if name == 'keywords': - if contents: - self.metadata['tags'] = [Tag(unicode(tag), self.settings) for tag in contents.split(',')] - elif name == 'date': - self.metadata['date'] = get_date(contents) - else: - self.metadata[name] = contents - - @classmethod - def _attr_value(cls, attrs, name, default=None): - return next((x[1] for x in attrs if x[0] == name), default) - -class HTMLReader(Reader): - file_extensions = ['htm', 'html'] - enabled = True - - def read(self, filename): - """Parse content and metadata of markdown files""" - with open(filename) as content: - parser = PelicanHTMLParser(self.settings) - parser.feed(content) - parser.close() - return parser.body, parser.metadata _EXTENSIONS = {} diff --git a/pelican/themes/notmyidea/static/css/main.css b/pelican/themes/notmyidea/static/css/main.css index 92905076..dce9e247 100644 --- a/pelican/themes/notmyidea/static/css/main.css +++ b/pelican/themes/notmyidea/static/css/main.css @@ -312,6 +312,7 @@ img.left, figure.left {float: right; margin: 0 0 2em 2em;} .social a[type$='atom+xml'], .social a[type$='rss+xml'] {background-image: url('../images/icons/rss.png');} .social a[href*='twitter.com'] {background-image: url('../images/icons/twitter.png');} .social a[href*='linkedin.com'] {background-image: url('../images/icons/linkedin.png');} + .social a[href*='gitorious.org'] {background-image: url('../images/icons/gitorious.org');} /* About diff --git a/pelican/themes/notmyidea/static/images/icons/gitorious.png b/pelican/themes/notmyidea/static/images/icons/gitorious.png new file mode 100644 index 00000000..6485f5ec Binary files /dev/null and b/pelican/themes/notmyidea/static/images/icons/gitorious.png differ diff --git a/pelican/themes/notmyidea/templates/index.html b/pelican/themes/notmyidea/templates/index.html index de607152..8752a6b6 100644 --- a/pelican/themes/notmyidea/templates/index.html +++ b/pelican/themes/notmyidea/templates/index.html @@ -41,12 +41,12 @@ {% endif %} - {% if loop.last and (articles_page.has_previous() - or not articles_page.has_previous() and loop.length > 1) %} - {% include 'pagination.html' %} - {% endif %} {% if loop.last %} + {% if loop.last and (articles_page.has_previous() + or not articles_page.has_previous() and loop.length > 1) %} + {% include 'pagination.html' %} + {% endif %} {% endif %} {% endfor %} diff --git a/pelican/themes/simple/templates/gosquared.html b/pelican/themes/simple/templates/gosquared.html new file mode 100644 index 00000000..f47efcf4 --- /dev/null +++ b/pelican/themes/simple/templates/gosquared.html @@ -0,0 +1,14 @@ +{% if GOSQUARED_SITENAME %} + +{% endif %} diff --git a/pelican/utils.py b/pelican/utils.py index 99c938f5..0940bf72 100644 --- a/pelican/utils.py +++ b/pelican/utils.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -import contextlib import os import re import pytz @@ -34,10 +33,10 @@ def get_date(string): pass raise ValueError("'%s' is not a valid date" % string) -@contextlib.contextmanager + def open(filename): """Open a file and return it's content""" - yield _open(filename, encoding='utf-8').read() + return _open(filename, encoding='utf-8').read() def slugify(value): diff --git a/tests/test_contents.py b/tests/test_contents.py index c6ef29a8..e7c9ad01 100644 --- a/tests/test_contents.py +++ b/tests/test_contents.py @@ -4,6 +4,7 @@ from .support import unittest from pelican.contents import Page from pelican.settings import _DEFAULT_CONFIG +from pelican.utils import truncate_html_words from jinja2.utils import generate_lorem_ipsum @@ -48,6 +49,20 @@ class TestPage(unittest.TestCase): page = Page(**self.page_kwargs) self.assertEqual(page.summary, TEST_SUMMARY) + def test_summary_max_length(self): + """If a :SUMMARY_MAX_LENGTH: is set, and there is no other summary, generated summary + should not exceed the given length.""" + page_kwargs = self._copy_page_kwargs() + settings = _DEFAULT_CONFIG.copy() + page_kwargs['settings'] = settings + del page_kwargs['metadata']['summary'] + settings['SUMMARY_MAX_LENGTH'] = None + page = Page(**page_kwargs) + self.assertEqual(page.summary, TEST_CONTENT) + settings['SUMMARY_MAX_LENGTH'] = 10 + page = Page(**page_kwargs) + self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10)) + def test_slug(self): """If a title is given, it should be used to generate the slug.""" page = Page(**self.page_kwargs) @@ -83,14 +98,9 @@ class TestPage(unittest.TestCase): from datetime import datetime from sys import platform dt = datetime(2015, 9, 13) - # make a deep copy of page_kawgs - page_kwargs = dict([(key, self.page_kwargs[key]) for key in - self.page_kwargs]) - for key in page_kwargs: - if not isinstance(page_kwargs[key], dict): - break - page_kwargs[key] = dict([(subkey, page_kwargs[key][subkey]) - for subkey in page_kwargs[key]]) + + page_kwargs = self._copy_page_kwargs() + # set its date to dt page_kwargs['metadata']['date'] = dt page = Page(**page_kwargs) @@ -124,3 +134,15 @@ class TestPage(unittest.TestCase): # Until we find some other method to test this functionality, we # will simply skip this test. unittest.skip("There is no locale %s in this system." % locale) + + def _copy_page_kwargs(self): + # make a deep copy of page_kwargs + page_kwargs = dict([(key, self.page_kwargs[key]) for key in + self.page_kwargs]) + for key in page_kwargs: + if not isinstance(page_kwargs[key], dict): + break + page_kwargs[key] = dict([(subkey, page_kwargs[key][subkey]) + for subkey in page_kwargs[key]]) + + return page_kwargs