Merge pull request #667 from wking/page-path

Consolidate Path.filename and and StaticContent.filepath as `source_path`.
This commit is contained in:
Bruno Binet 2013-01-18 07:30:03 -08:00
commit 6233f5a409
12 changed files with 213 additions and 130 deletions

View file

@ -47,19 +47,17 @@ Take a look at the Markdown reader::
class MarkdownReader(Reader):
enabled = bool(Markdown)
def read(self, filename):
def read(self, source_path):
"""Parse content and metadata of markdown files"""
text = open(filename)
text = pelican_open(source_path)
md = Markdown(extensions = ['meta', 'codehilite'])
content = md.convert(text)
metadata = {}
for name, value in md.Meta.items():
if name in _METADATA_FIELDS:
meta = _METADATA_FIELDS[name](value[0])
else:
meta = value[0]
metadata[name.lower()] = meta
name = name.lower()
meta = self.process_metadata(name, value[0])
metadata[name] = meta
return content, metadata
Simple, isn't it?

View file

@ -15,7 +15,7 @@ from sys import platform, stdin
from pelican.settings import _DEFAULT_CONFIG
from pelican.utils import (slugify, truncate_html_words, memoized,
python_2_unicode_compatible)
python_2_unicode_compatible, deprecated_attribute)
from pelican import signals
import pelican.utils
@ -31,8 +31,12 @@ class Page(object):
mandatory_properties = ('title',)
default_template = 'page'
@deprecated_attribute(old='filename', new='source_path', since=(3, 2, 0))
def filename():
return None
def __init__(self, content, metadata=None, settings=None,
filename=None, context=None):
source_path=None, context=None):
# init parameters
if not metadata:
metadata = {}
@ -75,8 +79,8 @@ class Page(object):
if not hasattr(self, 'slug') and hasattr(self, 'title'):
self.slug = slugify(self.title)
if filename:
self.filename = filename
if source_path:
self.source_path = source_path
# manage the date format
if not hasattr(self, 'date_format'):
@ -160,8 +164,8 @@ class Page(object):
if value.startswith('/'):
value = value[1:]
else:
# relative to the filename of this content
value = self.get_relative_filename(
# relative to the source path of this content
value = self.get_relative_source_path(
os.path.join(self.relative_dir, value)
)
@ -215,24 +219,25 @@ class Page(object):
else:
return self.default_template
def get_relative_filename(self, filename=None):
def get_relative_source_path(self, source_path=None):
"""Return the relative path (from the content path) to the given
filename.
source_path.
If no filename is specified, use the filename of this content object.
If no source path is specified, use the source path of this
content object.
"""
if not filename:
filename = self.filename
if not source_path:
source_path = self.source_path
return os.path.relpath(
os.path.abspath(os.path.join(self.settings['PATH'], filename)),
os.path.abspath(os.path.join(self.settings['PATH'], source_path)),
os.path.abspath(self.settings['PATH'])
)
@property
def relative_dir(self):
return os.path.dirname(os.path.relpath(
os.path.abspath(self.filename),
os.path.abspath(self.source_path),
os.path.abspath(self.settings['PATH']))
)
@ -300,16 +305,20 @@ class Author(URLWrapper):
@python_2_unicode_compatible
class StaticContent(object):
@deprecated_attribute(old='filepath', new='source_path', since=(3, 2, 0))
def filepath():
return None
def __init__(self, src, dst=None, settings=None):
if not settings:
settings = copy.deepcopy(_DEFAULT_CONFIG)
self.src = src
self.url = dst or src
self.filepath = os.path.join(settings['PATH'], src)
self.source_path = os.path.join(settings['PATH'], src)
self.save_as = os.path.join(settings['OUTPUT_PATH'], self.url)
def __str__(self):
return self.filepath
return self.source_path
def is_valid_content(content, f):

View file

@ -108,8 +108,8 @@ class Generator(object):
files.append(os.sep.join((root, f)))
return files
def add_filename(self, content):
location = os.path.relpath(os.path.abspath(content.filename),
def add_source_path(self, content):
location = os.path.relpath(os.path.abspath(content.source_path),
os.path.abspath(self.path))
self.context['filenames'][location] = content
@ -352,11 +352,11 @@ class ArticlesGenerator(Generator):
signals.article_generate_context.send(self, metadata=metadata)
article = Article(content, metadata, settings=self.settings,
filename=f, context=self.context)
source_path=f, context=self.context)
if not is_valid_content(article, f):
continue
self.add_filename(article)
self.add_source_path(article)
if article.status == "published":
if hasattr(article, 'tags'):
@ -455,11 +455,11 @@ class PagesGenerator(Generator):
continue
signals.pages_generate_context.send(self, metadata=metadata)
page = Page(content, metadata, settings=self.settings,
filename=f, context=self.context)
source_path=f, context=self.context)
if not is_valid_content(page, f):
continue
self.add_filename(page)
self.add_source_path(page)
if page.status == "published":
all_pages.append(page)
@ -520,8 +520,8 @@ class StaticGenerator(Generator):
# copy all StaticContent files
for sc in self.staticfiles:
mkdir_p(os.path.dirname(sc.save_as))
shutil.copy(sc.filepath, sc.save_as)
logger.info('copying %s to %s' % (sc.filepath, sc.save_as))
shutil.copy(sc.source_path, sc.save_as)
logger.info('copying {} to {}'.format(sc.source_path, sc.save_as))
class PdfGenerator(Generator):
@ -544,11 +544,11 @@ class PdfGenerator(Generator):
raise Exception("unable to find rst2pdf")
def _create_pdf(self, obj, output_path):
if obj.filename.endswith(".rst"):
if obj.source_path.endswith('.rst'):
filename = obj.slug + ".pdf"
output_pdf = os.path.join(output_path, filename)
# print "Generating pdf for", obj.filename, " in ", output_pdf
with open(obj.filename) as f:
# print('Generating pdf for', obj.source_path, 'in', output_pdf)
with open(obj.source_path) as f:
self.pdfcreator.createPdf(text=f.read(), output=output_pdf)
logger.info(' [ok] writing %s' % output_pdf)
@ -578,9 +578,9 @@ class SourceFileGenerator(Generator):
self.output_extension = self.settings['OUTPUT_SOURCES_EXTENSION']
def _create_source(self, obj, output_path):
filename = os.path.splitext(obj.save_as)[0]
dest = os.path.join(output_path, filename + self.output_extension)
copy('', obj.filename, dest)
output_path = os.path.splitext(obj.save_as)[0]
dest = os.path.join(output_path, output_path + self.output_extension)
copy('', obj.source_path, dest)
def generate_output(self, writer=None):
logger.info(' Generating source files...')

View file

@ -109,20 +109,20 @@ class RstReader(Reader):
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, filename):
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2'}
pub = docutils.core.Publisher(
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.writer.translator_class = PelicanHTMLTranslator
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=filename)
pub.set_source(source_path=source_path)
pub.publish()
return pub
def read(self, filename):
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(filename)
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
@ -151,9 +151,9 @@ class MarkdownReader(Reader):
output[name] = self.process_metadata(name, value[0])
return output
def read(self, filename):
def read(self, source_path):
"""Parse content and metadata of markdown files"""
text = pelican_open(filename)
text = pelican_open(source_path)
md = Markdown(extensions=set(self.extensions + ['meta']))
content = md.convert(text)
@ -165,9 +165,9 @@ class HtmlReader(Reader):
file_extensions = ['html', 'htm']
_re = re.compile('\<\!\-\-\#\s?[A-z0-9_-]*\s?\:s?[A-z0-9\s_-]*\s?\-\-\>')
def read(self, filename):
def read(self, source_path):
"""Parse content and metadata of (x)HTML files"""
with open(filename) as content:
with open(source_path) as content:
metadata = {'title': 'unnamed'}
for i in self._re.findall(content):
key = i.split(':')[0][5:].strip()
@ -183,10 +183,10 @@ class AsciiDocReader(Reader):
file_extensions = ['asc']
default_options = ["--no-header-footer", "-a newline=\\n"]
def read(self, filename):
def read(self, source_path):
"""Parse content and metadata of asciidoc files"""
from cStringIO import StringIO
text = StringIO(pelican_open(filename))
text = StringIO(pelican_open(source_path))
content = StringIO()
ad = AsciiDocAPI()
@ -216,14 +216,14 @@ for cls in Reader.__subclasses__():
_EXTENSIONS[ext] = cls
def read_file(filename, fmt=None, settings=None):
def read_file(path, fmt=None, settings=None):
"""Return a reader object using the given format."""
base, ext = os.path.splitext(os.path.basename(filename))
base, ext = os.path.splitext(os.path.basename(path))
if not fmt:
fmt = ext[1:]
if fmt not in _EXTENSIONS:
raise TypeError('Pelican does not know how to parse %s' % filename)
raise TypeError('Pelican does not know how to parse {}'.format(path))
reader = _EXTENSIONS[fmt](settings)
settings_key = '%s_EXTENSIONS' % fmt.upper()
@ -234,7 +234,7 @@ def read_file(filename, fmt=None, settings=None):
if not reader.enabled:
raise ValueError("Missing dependencies for %s" % fmt)
content, metadata = reader.read(filename)
content, metadata = reader.read(path)
# eventually filter the content with typogrify if asked so
if settings and settings.get('TYPOGRIFY'):
@ -242,9 +242,9 @@ def read_file(filename, fmt=None, settings=None):
content = typogrify(content)
metadata['title'] = typogrify(metadata['title'])
filename_metadata = settings and settings.get('FILENAME_METADATA')
if filename_metadata:
match = re.match(filename_metadata, base)
file_metadata = settings and settings.get('FILENAME_METADATA')
if file_metadata:
match = re.match(file_metadata, base)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():

View file

@ -84,15 +84,15 @@ _DEFAULT_CONFIG = {'PATH': '.',
}
def read_settings(filename=None, override=None):
if filename:
local_settings = get_settings_from_file(filename)
def read_settings(path=None, override=None):
if path:
local_settings = get_settings_from_file(path)
# Make the paths relative to the settings file
for p in ['PATH', 'OUTPUT_PATH', 'THEME']:
if p in local_settings and local_settings[p] is not None \
and not isabs(local_settings[p]):
absp = os.path.abspath(os.path.normpath(os.path.join(
os.path.dirname(filename), local_settings[p])))
os.path.dirname(path), local_settings[p])))
if p != 'THEME' or os.path.exists(absp):
local_settings[p] = absp
else:
@ -116,14 +116,14 @@ def get_settings_from_module(module=None, default_settings=_DEFAULT_CONFIG):
return context
def get_settings_from_file(filename, default_settings=_DEFAULT_CONFIG):
def get_settings_from_file(path, default_settings=_DEFAULT_CONFIG):
"""
Load settings from a file path, returning a dict.
"""
name = os.path.basename(filename).rpartition(".")[0]
module = imp.load_source(name, filename)
name = os.path.basename(path).rpartition('.')[0]
module = imp.load_source(name, path)
return get_settings_from_module(module, default_settings=default_settings)

View file

@ -6,6 +6,7 @@ import os
import re
import pytz
import shutil
import traceback
import logging
import errno
import locale
@ -122,6 +123,48 @@ class memoized(object):
'''Support instance methods.'''
return partial(self.__call__, obj)
def deprecated_attribute(old, new, since=None, remove=None, doc=None):
"""Attribute deprecation decorator for gentle upgrades
For example:
class MyClass (object):
@deprecated_attribute(
old='abc', new='xyz', since=(3, 2, 0), remove=(4, 1, 3))
def abc(): return None
def __init__(self):
xyz = 5
Note that the decorator needs a dummy method to attach to, but the
content of the dummy method is ignored.
"""
def _warn():
version = '.'.join(six.text_type(x) for x in since)
message = ['{} has been deprecated since {}'.format(old, version)]
if remove:
version = '.'.join(six.text_type(x) for x in remove)
message.append(
' and will be removed by version {}'.format(version))
message.append('. Use {} instead.'.format(new))
logger.warning(''.join(message))
logger.debug(''.join(
six.text_type(x) for x in traceback.format_stack()))
def fget(self):
_warn()
return getattr(self, new)
def fset(self, value):
_warn()
setattr(self, new, value)
def decorator(dummy):
return property(fget=fget, fset=fset, doc=doc)
return decorator
def get_date(string):
"""Return a datetime object from a string.
@ -141,9 +184,9 @@ def get_date(string):
raise ValueError("'%s' is not a valid date" % string)
def pelican_open(filename):
def pelican_open(path):
"""Open a file and return it's content"""
return open(filename, encoding='utf-8').read()
return open(path, encoding='utf-8').read()
def slugify(value):
@ -245,9 +288,9 @@ def clean_output_dir(path):
logger.error("Unable to delete %s, file type unknown" % file)
def get_relative_path(filename):
"""Return the relative path from the given filename to the root path."""
nslashes = filename.count('/')
def get_relative_path(path):
"""Return the relative path from the given path to the root path."""
nslashes = path.count('/')
if nslashes == 0:
return '.'
else:
@ -344,15 +387,16 @@ def process_translations(content_list):
if len_ > 1:
logger.warning('there are %s variants of "%s"' % (len_, slug))
for x in default_lang_items:
logger.warning(' %s' % x.filename)
logger.warning(' {}'.format(x.source_path))
elif len_ == 0:
default_lang_items = items[:1]
if not slug:
msg = 'empty slug for %r. ' % default_lang_items[0].filename\
+ 'You can fix this by adding a title or a slug to your '\
+ 'content'
logger.warning(msg)
logger.warning((
'empty slug for {!r}. '
'You can fix this by adding a title or a slug to your '
'content'
).format(default_lang_items[0].source_path))
index.extend(default_lang_items)
translations.extend([x for x in items if x not in default_lang_items])
for a in items:
@ -388,14 +432,14 @@ def files_changed(path, extensions):
FILENAMES_MTIMES = defaultdict(int)
def file_changed(filename):
mtime = os.stat(filename).st_mtime
if FILENAMES_MTIMES[filename] == 0:
FILENAMES_MTIMES[filename] = mtime
def file_changed(path):
mtime = os.stat(path).st_mtime
if FILENAMES_MTIMES[path] == 0:
FILENAMES_MTIMES[path] = mtime
return False
else:
if mtime > FILENAMES_MTIMES[filename]:
FILENAMES_MTIMES[filename] = mtime
if mtime > FILENAMES_MTIMES[path]:
FILENAMES_MTIMES[path] = mtime
return True
return False

View file

@ -46,23 +46,23 @@ class Writer(object):
pubdate=set_date_tzinfo(item.date,
self.settings.get('TIMEZONE', None)))
def write_feed(self, elements, context, filename=None, feed_type='atom'):
def write_feed(self, elements, context, path=None, feed_type='atom'):
"""Generate a feed with the list of articles provided
Return the feed. If no output_path or filename is specified, just
Return the feed. If no path or output_path is specified, just
return the feed object.
:param elements: the articles to put on the feed.
:param context: the context to get the feed metadata.
:param filename: the filename to output.
:param path: the path to output.
:param feed_type: the feed type to use (atom or rss)
"""
old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
try:
self.site_url = context.get('SITEURL', get_relative_path(filename))
self.site_url = context.get('SITEURL', get_relative_path(path))
self.feed_domain = context.get('FEED_DOMAIN')
self.feed_url = '%s/%s' % (self.feed_domain, filename)
self.feed_url = '{}/{}'.format(self.feed_domain, path)
feed = self._create_new_feed(feed_type, context)
@ -72,8 +72,8 @@ class Writer(object):
for i in range(max_items):
self._add_item_to_the_feed(feed, elements[i])
if filename:
complete_path = os.path.join(self.output_path, filename)
if path:
complete_path = os.path.join(self.output_path, path)
try:
os.makedirs(os.path.dirname(complete_path))
except Exception:
@ -114,14 +114,14 @@ class Writer(object):
output = template.render(localcontext)
finally:
locale.setlocale(locale.LC_ALL, old_locale)
filename = os.sep.join((output_path, name))
path = os.path.join(output_path, name)
try:
os.makedirs(os.path.dirname(filename))
os.makedirs(os.path.dirname(path))
except Exception:
pass
with open(filename, 'w', encoding='utf-8') as f:
with open(path, 'w', encoding='utf-8') as f:
f.write(output)
logger.info('writing %s' % filename)
logger.info('writing {}'.format(path))
localcontext = context.copy()
if relative_urls:

View file

@ -176,3 +176,24 @@ class LogCountHandler(BufferingHandler):
if (msg is None or re.match(msg, l.getMessage()))
and (level is None or l.levelno == level)
])
class LoggedTestCase(unittest.TestCase):
"""A test case that captures log messages
"""
def setUp(self):
super(LoggedTestCase, self).setUp()
self._logcount_handler = LogCountHandler()
logging.getLogger().addHandler(self._logcount_handler)
def tearDown(self):
logging.getLogger().removeHandler(self._logcount_handler)
super(LoggedTestCase, self).tearDown()
def assertLogCountEqual(self, count=None, msg=None, **kwargs):
actual = self._logcount_handler.count_logs(msg=msg, **kwargs)
self.assertEqual(
actual, count,
msg='expected {} occurrences of {!r}, but found {}'.format(
count, msg, actual))

View file

@ -229,20 +229,20 @@ class TestTemplatePagesGenerator(unittest.TestCase):
# create a dummy template file
template_dir = os.path.join(self.temp_content, 'template')
template_filename = os.path.join(template_dir, 'source.html')
template_path = os.path.join(template_dir, 'source.html')
os.makedirs(template_dir)
with open(template_filename, 'w') as template_file:
with open(template_path, 'w') as template_file:
template_file.write(self.TEMPLATE_CONTENT)
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_filename = os.path.join(
output_path = os.path.join(
self.temp_output, 'generated', 'file.html')
# output file has been generated
self.assertTrue(os.path.exists(output_filename))
self.assertTrue(os.path.exists(output_path))
# output content is correct
with open(output_filename, 'r') as output_file:
with open(output_path, 'r') as output_file:
self.assertEquals(output_file.read(), 'foo: bar')

View file

@ -1,9 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
try:
import unittest2 as unittest
except ImportError:
import unittest # NOQA
import os
from filecmp import dircmp
@ -14,7 +10,7 @@ import logging
from pelican import Pelican
from pelican.settings import read_settings
from .support import LogCountHandler
from .support import LoggedTestCase
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_PATH = os.path.abspath(os.sep.join((CURRENT_DIR, "..", "samples")))
@ -39,13 +35,12 @@ def recursiveDiff(dcmp):
return diff
class TestPelican(unittest.TestCase):
class TestPelican(LoggedTestCase):
# general functional testing for pelican. Basically, this test case tries
# to run pelican in different situations and see how it behaves
def setUp(self):
self.logcount_handler = LogCountHandler()
logging.getLogger().addHandler(self.logcount_handler)
super(TestPelican, self).setUp()
self.temp_path = mkdtemp()
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
@ -53,7 +48,7 @@ class TestPelican(unittest.TestCase):
def tearDown(self):
rmtree(self.temp_path)
locale.setlocale(locale.LC_ALL, self.old_locale)
logging.getLogger().removeHandler(self.logcount_handler)
super(TestPelican, self).tearDown()
def assertFilesEqual(self, diff):
msg = "some generated files differ from the expected functional " \
@ -70,7 +65,7 @@ class TestPelican(unittest.TestCase):
def test_basic_generation_works(self):
# when running pelican without settings, it should pick up the default
# ones and generate correct output without raising any exception
settings = read_settings(filename=None, override={
settings = read_settings(path=None, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'LOCALE': locale.normalize('en_US'),
@ -79,14 +74,14 @@ class TestPelican(unittest.TestCase):
pelican.run()
dcmp = dircmp(self.temp_path, os.sep.join((OUTPUT_PATH, "basic")))
self.assertFilesEqual(recursiveDiff(dcmp))
self.assertEqual(self.logcount_handler.count_logs(
self.assertLogCountEqual(
count=10,
msg="Unable to find.*skipping url replacement",
level=logging.WARNING,
), 10, msg="bad number of occurences found for this log")
level=logging.WARNING)
def test_custom_generation_works(self):
# the same thing with a specified set of settings should work
settings = read_settings(filename=SAMPLE_CONFIG, override={
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'LOCALE': locale.normalize('en_US'),

View file

@ -11,7 +11,7 @@ CUR_DIR = os.path.dirname(__file__)
CONTENT_PATH = os.path.join(CUR_DIR, 'content')
def _filename(*args):
def _path(*args):
return os.path.join(CONTENT_PATH, *args)
@ -19,7 +19,7 @@ class RstReaderTest(unittest.TestCase):
def test_article_with_metadata(self):
reader = readers.RstReader({})
content, metadata = reader.read(_filename('article_with_metadata.rst'))
content, metadata = reader.read(_path('article_with_metadata.rst'))
expected = {
'category': 'yeah',
'author': 'Alexis Métaireau',
@ -37,7 +37,7 @@ class RstReaderTest(unittest.TestCase):
def test_article_with_filename_metadata(self):
content, metadata = readers.read_file(
_filename('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
_path('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
settings={})
expected = {
'category': 'yeah',
@ -48,7 +48,7 @@ class RstReaderTest(unittest.TestCase):
self.assertEquals(value, expected[key], key)
content, metadata = readers.read_file(
_filename('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
_path('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
settings={
'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*'
})
@ -62,7 +62,7 @@ class RstReaderTest(unittest.TestCase):
self.assertEquals(value, expected[key], key)
content, metadata = readers.read_file(
_filename('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
_path('2012-11-29_rst_w_filename_meta#foo-bar.rst'),
settings={
'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2})_' \
'_(?P<Slug>.*)' \
@ -82,7 +82,7 @@ class RstReaderTest(unittest.TestCase):
def test_article_metadata_key_lowercase(self):
"""Keys of metadata should be lowercase."""
reader = readers.RstReader({})
content, metadata = reader.read(_filename('article_with_uppercase_metadata.rst'))
content, metadata = reader.read(_path('article_with_uppercase_metadata.rst'))
self.assertIn('category', metadata, "Key should be lowercase.")
self.assertEquals('Yeah', metadata.get('category'), "Value keeps cases.")
@ -90,7 +90,7 @@ class RstReaderTest(unittest.TestCase):
def test_typogrify(self):
# if nothing is specified in the settings, the content should be
# unmodified
content, _ = readers.read_file(_filename('article.rst'))
content, _ = readers.read_file(_path('article.rst'))
expected = "<p>This is some content. With some stuff to "\
"&quot;typogrify&quot;.</p>\n<p>Now with added "\
'support for <abbr title="three letter acronym">'\
@ -100,7 +100,7 @@ class RstReaderTest(unittest.TestCase):
try:
# otherwise, typogrify should be applied
content, _ = readers.read_file(_filename('article.rst'),
content, _ = readers.read_file(_path('article.rst'),
settings={'TYPOGRIFY': True})
expected = "<p>This is some content. With some stuff to&nbsp;"\
"&#8220;typogrify&#8221;.</p>\n<p>Now with added "\
@ -118,7 +118,7 @@ class MdReaderTest(unittest.TestCase):
def test_article_with_md_extension(self):
# test to ensure the md extension is being processed by the correct reader
reader = readers.MarkdownReader({})
content, metadata = reader.read(_filename('article_with_md_extension.md'))
content, metadata = reader.read(_path('article_with_md_extension.md'))
expected = "<h1>Test Markdown File Header</h1>\n"\
"<h2>Used for pelican test</h2>\n"\
"<p>The quick brown fox jumped over the lazy dog's back.</p>"
@ -136,7 +136,7 @@ class MdReaderTest(unittest.TestCase):
def test_article_with_mkd_extension(self):
# test to ensure the mkd extension is being processed by the correct reader
reader = readers.MarkdownReader({})
content, metadata = reader.read(_filename('article_with_mkd_extension.mkd'))
content, metadata = reader.read(_path('article_with_mkd_extension.mkd'))
expected = "<h1>Test Markdown File Header</h1>\n"\
"<h2>Used for pelican test</h2>\n"\
"<p>This is another markdown test file. Uses the mkd extension.</p>"
@ -147,7 +147,7 @@ class MdReaderTest(unittest.TestCase):
def test_article_with_markdown_markup_extension(self):
# test to ensure the markdown markup extension is being processed as expected
content, metadata = readers.read_file(
_filename('article_with_markdown_markup_extensions.md'),
_path('article_with_markdown_markup_extensions.md'),
settings={'MD_EXTENSIONS': ['toc', 'codehilite', 'extra']})
expected = '<div class="toc">\n'\
'<ul>\n'\
@ -165,7 +165,7 @@ class MdReaderTest(unittest.TestCase):
@unittest.skipUnless(readers.Markdown, "markdown isn't installed")
def test_article_with_filename_metadata(self):
content, metadata = readers.read_file(
_filename('2012-11-30_md_w_filename_meta#foo-bar.md'),
_path('2012-11-30_md_w_filename_meta#foo-bar.md'),
settings={})
expected = {
'category': 'yeah',
@ -175,7 +175,7 @@ class MdReaderTest(unittest.TestCase):
self.assertEquals(value, metadata[key], key)
content, metadata = readers.read_file(
_filename('2012-11-30_md_w_filename_meta#foo-bar.md'),
_path('2012-11-30_md_w_filename_meta#foo-bar.md'),
settings={
'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*'
})
@ -188,7 +188,7 @@ class MdReaderTest(unittest.TestCase):
self.assertEquals(value, metadata[key], key)
content, metadata = readers.read_file(
_filename('2012-11-30_md_w_filename_meta#foo-bar.md'),
_path('2012-11-30_md_w_filename_meta#foo-bar.md'),
settings={
'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2})'
'_(?P<Slug>.*)'
@ -210,7 +210,7 @@ class AdReaderTest(unittest.TestCase):
def test_article_with_asc_extension(self):
# test to ensure the asc extension is being processed by the correct reader
reader = readers.AsciiDocReader({})
content, metadata = reader.read(_filename('article_with_asc_extension.asc'))
content, metadata = reader.read(_path('article_with_asc_extension.asc'))
expected = '<hr>\n<h2><a name="_used_for_pelican_test"></a>Used for pelican test</h2>\n'\
'<p>The quick brown fox jumped over the lazy dog&#8217;s back.</p>\n'
self.assertEqual(content, expected)
@ -241,7 +241,7 @@ class AdReaderTest(unittest.TestCase):
def test_article_with_asc_options(self):
# test to ensure the ASCIIDOC_OPTIONS is being used
reader = readers.AsciiDocReader(dict(ASCIIDOC_OPTIONS=["-a revision=1.0.42"]))
content, metadata = reader.read(_filename('article_with_asc_options.asc'))
content, metadata = reader.read(_path('article_with_asc_options.asc'))
expected = '<hr>\n<h2><a name="_used_for_pelican_test"></a>Used for pelican test</h2>\n'\
'<p>version 1.0.42</p>\n'\
'<p>The quick brown fox jumped over the lazy dog&#8217;s back.</p>\n'

View file

@ -1,16 +1,32 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import shutil
import os
import datetime
import time
from pelican import utils
from .support import get_article, unittest
from .support import get_article, LoggedTestCase
from pelican.utils import NoFilesError
class TestUtils(unittest.TestCase):
class TestUtils(LoggedTestCase):
_new_attribute = 'new_value'
@utils.deprecated_attribute(
old='_old_attribute', new='_new_attribute',
since=(3, 1, 0), remove=(4, 1, 3))
def _old_attribute(): return None
def test_deprecated_attribute(self):
value = self._old_attribute
self.assertEquals(value, self._new_attribute)
self.assertLogCountEqual(
count=1,
msg=('_old_attribute has been deprecated since 3.1.0 and will be '
'removed by version 4.1.3. Use _new_attribute instead'),
level=logging.WARNING)
def test_get_date(self):
# valid ones
@ -79,17 +95,17 @@ class TestUtils(unittest.TestCase):
"""Test if file changes are correctly detected
Make sure to handle not getting any files correctly"""
path = os.path.join(os.path.dirname(__file__), 'content')
filename = os.path.join(path, 'article_with_metadata.rst')
changed = utils.files_changed(path, 'rst')
dirname = os.path.join(os.path.dirname(__file__), 'content')
path = os.path.join(dirname, 'article_with_metadata.rst')
changed = utils.files_changed(dirname, 'rst')
self.assertEquals(changed, True)
changed = utils.files_changed(path, 'rst')
changed = utils.files_changed(dirname, 'rst')
self.assertEquals(changed, False)
t = time.time()
os.utime(filename, (t, t))
changed = utils.files_changed(path, 'rst')
os.utime(path, (t, t))
changed = utils.files_changed(dirname, 'rst')
self.assertEquals(changed, True)
self.assertAlmostEqual(utils.LAST_MTIME, t, delta=1)