mirror of
https://github.com/getpelican/pelican.git
synced 2025-10-15 20:28:56 +02:00
Merge branch 'master' of git://github.com/ametaireau/pelican
This commit is contained in:
commit
0f21583625
10 changed files with 64 additions and 114 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -2,6 +2,7 @@
|
|||
.*.swp
|
||||
.*.swo
|
||||
*.pyc
|
||||
.DS_Store
|
||||
docs/_build
|
||||
docs/fr/_build
|
||||
build
|
||||
|
|
|
|||
|
|
@ -98,6 +98,9 @@ GITHUB_URL :
|
|||
|
||||
GOOGLE_ANALYTICS :
|
||||
'UA-XXXX-YYYY' pour activer Google analytics ;
|
||||
|
||||
GOSQUARED_SITENAME :
|
||||
'XXX-YYYYYY-X' pour activer GoSquared ;
|
||||
|
||||
JINJA_EXTENSIONS :
|
||||
Liste d'extension Jinja2 que vous souhaitez utiliser ;
|
||||
|
|
|
|||
|
|
@ -91,6 +91,12 @@ Setting name (default value) What doe
|
|||
index pages for collections of content e.g. tags and
|
||||
category index pages.
|
||||
`PAGINATED_DIRECT_TEMPLATES` (``('index',)``) Provides the direct templates that should be paginated.
|
||||
`SUMMARY_MAX_LENGTH` (``50``) When creating a short summary of an article, this will
|
||||
be the default length in words of the text created.
|
||||
This only applies if your content does not otherwise
|
||||
specify a summary. Setting to None will cause the summary
|
||||
to be a copy of the original content.
|
||||
|
||||
===================================================================== =====================================================================
|
||||
|
||||
.. [#] Default is the system locale.
|
||||
|
|
@ -398,6 +404,7 @@ Setting name What does it do ?
|
|||
`GITHUB_URL` Your GitHub URL (if you have one). It will then
|
||||
use this information to create a GitHub ribbon.
|
||||
`GOOGLE_ANALYTICS` 'UA-XXXX-YYYY' to activate Google Analytics.
|
||||
`GOSQUARED_SITENAME` 'XXX-YYYYYY-X' to activate GoSquared.
|
||||
`MENUITEMS` A list of tuples (Title, URL) for additional menu
|
||||
items to appear at the beginning of the main menu.
|
||||
`PIWIK_URL` URL to your Piwik server - without 'http://' at the
|
||||
|
|
|
|||
|
|
@ -13,11 +13,8 @@ try:
|
|||
from markdown import Markdown
|
||||
except ImportError:
|
||||
Markdown = False # NOQA
|
||||
import cgi
|
||||
from HTMLParser import HTMLParser
|
||||
import re
|
||||
|
||||
|
||||
from pelican.contents import Category, Tag, Author
|
||||
from pelican.utils import get_date, open
|
||||
|
||||
|
|
@ -129,12 +126,13 @@ class MarkdownReader(Reader):
|
|||
metadata[name] = self.process_metadata(name, value[0])
|
||||
return content, metadata
|
||||
|
||||
"""
|
||||
|
||||
class HtmlReader(Reader):
|
||||
file_extensions = ['html', 'htm']
|
||||
_re = re.compile('\<\!\-\-\#\s?[A-z0-9_-]*\s?\:s?[A-z0-9\s_-]*\s?\-\-\>')
|
||||
|
||||
def read(self, filename):
|
||||
"""Parse content and metadata of (x)HTML files"""
|
||||
with open(filename) as content:
|
||||
metadata = {'title': 'unnamed'}
|
||||
for i in self._re.findall(content):
|
||||
|
|
@ -144,101 +142,6 @@ class HtmlReader(Reader):
|
|||
metadata[name] = self.process_metadata(name, value)
|
||||
|
||||
return content, metadata
|
||||
"""
|
||||
|
||||
class PelicanHTMLParser(HTMLParser):
|
||||
def __init__(self, settings):
|
||||
HTMLParser.__init__(self)
|
||||
self.body = ''
|
||||
self.metadata = {}
|
||||
self.settings = settings
|
||||
|
||||
self._data_buffer = ''
|
||||
|
||||
self._in_top_level = True
|
||||
self._in_head = False
|
||||
self._in_title = False
|
||||
self._in_body = False
|
||||
self._in_tags = False
|
||||
|
||||
def handle_starttag(self, tag, attrs):
|
||||
if tag == 'head' and self._in_top_level:
|
||||
self._in_top_level = False
|
||||
self._in_head = True
|
||||
elif tag == 'title' and self._in_head:
|
||||
self._in_title = True
|
||||
self._data_buffer = ''
|
||||
elif tag == 'body' and self._in_top_level:
|
||||
self._in_top_level = False
|
||||
self._in_body = True
|
||||
self._data_buffer = ''
|
||||
elif tag == 'meta' and self._in_head:
|
||||
self._handle_meta_tag(attrs)
|
||||
|
||||
elif self._in_body:
|
||||
self._data_buffer += self.build_tag(tag, attrs, False)
|
||||
|
||||
def handle_endtag(self, tag):
|
||||
if tag == 'head':
|
||||
if self._in_head:
|
||||
self._in_head = False
|
||||
self._in_top_level = True
|
||||
elif tag == 'title':
|
||||
self._in_title = False
|
||||
self.metadata['title'] = self._data_buffer
|
||||
elif tag == 'body':
|
||||
self.body = self._data_buffer
|
||||
self._in_body = False
|
||||
self._in_top_level = True
|
||||
elif self._in_body:
|
||||
self._data_buffer += '</{}>'.format(cgi.escape(tag))
|
||||
|
||||
def handle_startendtag(self, tag, attrs):
|
||||
if tag == 'meta' and self._in_head:
|
||||
self._handle_meta_tag(attrs)
|
||||
if self._in_body:
|
||||
self._data_buffer += self.build_tag(tag, attrs, True)
|
||||
|
||||
def handle_comment(self, data):
|
||||
if self._in_body and data.strip() == 'PELICAN_END_SUMMARY':
|
||||
self.metadata['summary'] = self._data_buffer
|
||||
|
||||
def handle_data(self, data):
|
||||
self._data_buffer += data
|
||||
|
||||
def build_tag(self, tag, attrs, close_tag):
|
||||
result = '<{}'.format(cgi.escape(tag))
|
||||
result += ''.join((' {}="{}"'.format(cgi.escape(k), cgi.escape(v)) for k,v in attrs))
|
||||
if close_tag:
|
||||
return result + ' />'
|
||||
return result + '>'
|
||||
|
||||
def _handle_meta_tag(self, attrs):
|
||||
name = self._attr_value(attrs, 'name')
|
||||
contents = self._attr_value(attrs, 'contents', '')
|
||||
if name == 'keywords':
|
||||
if contents:
|
||||
self.metadata['tags'] = [Tag(unicode(tag), self.settings) for tag in contents.split(',')]
|
||||
elif name == 'date':
|
||||
self.metadata['date'] = get_date(contents)
|
||||
else:
|
||||
self.metadata[name] = contents
|
||||
|
||||
@classmethod
|
||||
def _attr_value(cls, attrs, name, default=None):
|
||||
return next((x[1] for x in attrs if x[0] == name), default)
|
||||
|
||||
class HTMLReader(Reader):
|
||||
file_extensions = ['htm', 'html']
|
||||
enabled = True
|
||||
|
||||
def read(self, filename):
|
||||
"""Parse content and metadata of markdown files"""
|
||||
with open(filename) as content:
|
||||
parser = PelicanHTMLParser(self.settings)
|
||||
parser.feed(content)
|
||||
parser.close()
|
||||
return parser.body, parser.metadata
|
||||
|
||||
|
||||
_EXTENSIONS = {}
|
||||
|
|
|
|||
|
|
@ -312,6 +312,7 @@ img.left, figure.left {float: right; margin: 0 0 2em 2em;}
|
|||
.social a[type$='atom+xml'], .social a[type$='rss+xml'] {background-image: url('../images/icons/rss.png');}
|
||||
.social a[href*='twitter.com'] {background-image: url('../images/icons/twitter.png');}
|
||||
.social a[href*='linkedin.com'] {background-image: url('../images/icons/linkedin.png');}
|
||||
.social a[href*='gitorious.org'] {background-image: url('../images/icons/gitorious.org');}
|
||||
|
||||
/*
|
||||
About
|
||||
|
|
|
|||
BIN
pelican/themes/notmyidea/static/images/icons/gitorious.png
Normal file
BIN
pelican/themes/notmyidea/static/images/icons/gitorious.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.6 KiB |
|
|
@ -41,12 +41,12 @@
|
|||
</div><!-- /.entry-content -->
|
||||
</article></li>
|
||||
{% endif %}
|
||||
{% if loop.last and (articles_page.has_previous()
|
||||
or not articles_page.has_previous() and loop.length > 1) %}
|
||||
{% include 'pagination.html' %}
|
||||
{% endif %}
|
||||
{% if loop.last %}
|
||||
</ol><!-- /#posts-list -->
|
||||
{% if loop.last and (articles_page.has_previous()
|
||||
or not articles_page.has_previous() and loop.length > 1) %}
|
||||
{% include 'pagination.html' %}
|
||||
{% endif %}
|
||||
</section><!-- /#content -->
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
|
|
|||
14
pelican/themes/simple/templates/gosquared.html
Normal file
14
pelican/themes/simple/templates/gosquared.html
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
{% if GOSQUARED_SITENAME %}
|
||||
<script type="text/javascript">
|
||||
var GoSquared={};
|
||||
GoSquared.acct = "{{ GOSQUARED_SITENAME }}";
|
||||
(function(w){
|
||||
function gs(){
|
||||
w._gstc_lt=+(new Date); var d=document;
|
||||
var g = d.createElement("script"); g.type = "text/javascript"; g.async = true; g.src = "//d1l6p2sc9645hc.cloudfront.net/tracker.js";
|
||||
var s = d.getElementsByTagName("script")[0]; s.parentNode.insertBefore(g, s);
|
||||
}
|
||||
w.addEventListener?w.addEventListener("load",gs,false):w.attachEvent("onload",gs);
|
||||
})(window);
|
||||
</script>
|
||||
{% endif %}
|
||||
|
|
@ -1,5 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import contextlib
|
||||
import os
|
||||
import re
|
||||
import pytz
|
||||
|
|
@ -34,10 +33,10 @@ def get_date(string):
|
|||
pass
|
||||
raise ValueError("'%s' is not a valid date" % string)
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
||||
def open(filename):
|
||||
"""Open a file and return it's content"""
|
||||
yield _open(filename, encoding='utf-8').read()
|
||||
return _open(filename, encoding='utf-8').read()
|
||||
|
||||
|
||||
def slugify(value):
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from .support import unittest
|
|||
|
||||
from pelican.contents import Page
|
||||
from pelican.settings import _DEFAULT_CONFIG
|
||||
from pelican.utils import truncate_html_words
|
||||
|
||||
from jinja2.utils import generate_lorem_ipsum
|
||||
|
||||
|
|
@ -48,6 +49,20 @@ class TestPage(unittest.TestCase):
|
|||
page = Page(**self.page_kwargs)
|
||||
self.assertEqual(page.summary, TEST_SUMMARY)
|
||||
|
||||
def test_summary_max_length(self):
|
||||
"""If a :SUMMARY_MAX_LENGTH: is set, and there is no other summary, generated summary
|
||||
should not exceed the given length."""
|
||||
page_kwargs = self._copy_page_kwargs()
|
||||
settings = _DEFAULT_CONFIG.copy()
|
||||
page_kwargs['settings'] = settings
|
||||
del page_kwargs['metadata']['summary']
|
||||
settings['SUMMARY_MAX_LENGTH'] = None
|
||||
page = Page(**page_kwargs)
|
||||
self.assertEqual(page.summary, TEST_CONTENT)
|
||||
settings['SUMMARY_MAX_LENGTH'] = 10
|
||||
page = Page(**page_kwargs)
|
||||
self.assertEqual(page.summary, truncate_html_words(TEST_CONTENT, 10))
|
||||
|
||||
def test_slug(self):
|
||||
"""If a title is given, it should be used to generate the slug."""
|
||||
page = Page(**self.page_kwargs)
|
||||
|
|
@ -83,14 +98,9 @@ class TestPage(unittest.TestCase):
|
|||
from datetime import datetime
|
||||
from sys import platform
|
||||
dt = datetime(2015, 9, 13)
|
||||
# make a deep copy of page_kawgs
|
||||
page_kwargs = dict([(key, self.page_kwargs[key]) for key in
|
||||
self.page_kwargs])
|
||||
for key in page_kwargs:
|
||||
if not isinstance(page_kwargs[key], dict):
|
||||
break
|
||||
page_kwargs[key] = dict([(subkey, page_kwargs[key][subkey])
|
||||
for subkey in page_kwargs[key]])
|
||||
|
||||
page_kwargs = self._copy_page_kwargs()
|
||||
|
||||
# set its date to dt
|
||||
page_kwargs['metadata']['date'] = dt
|
||||
page = Page(**page_kwargs)
|
||||
|
|
@ -124,3 +134,15 @@ class TestPage(unittest.TestCase):
|
|||
# Until we find some other method to test this functionality, we
|
||||
# will simply skip this test.
|
||||
unittest.skip("There is no locale %s in this system." % locale)
|
||||
|
||||
def _copy_page_kwargs(self):
|
||||
# make a deep copy of page_kwargs
|
||||
page_kwargs = dict([(key, self.page_kwargs[key]) for key in
|
||||
self.page_kwargs])
|
||||
for key in page_kwargs:
|
||||
if not isinstance(page_kwargs[key], dict):
|
||||
break
|
||||
page_kwargs[key] = dict([(subkey, page_kwargs[key][subkey])
|
||||
for subkey in page_kwargs[key]])
|
||||
|
||||
return page_kwargs
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue