mirror of
https://github.com/getpelican/pelican.git
synced 2025-10-15 20:28:56 +02:00
This ease the creation of a new blog. I modified slightly the code wrote by @Skami18 here: https://gist.github.com/1025236/dfa695e67482477907c79ae709ab827b20b18b04 This commit also renames the import script to "pelican-import". Fixes #129.
243 lines
8.1 KiB
Python
Executable file
243 lines
8.1 KiB
Python
Executable file
#! /usr/bin/env python
|
|
|
|
from pelican.utils import slugify
|
|
|
|
from codecs import open
|
|
import os
|
|
import argparse
|
|
import time
|
|
|
|
|
|
def wp2fields(xml):
|
|
"""Opens a wordpress XML file, and yield pelican fields"""
|
|
from BeautifulSoup import BeautifulStoneSoup
|
|
|
|
xmlfile = open(xml, encoding='utf-8').read()
|
|
soup = BeautifulStoneSoup(xmlfile)
|
|
items = soup.rss.channel.findAll('item')
|
|
|
|
for item in items:
|
|
if item.fetch('wp:status')[0].contents[0] == "publish":
|
|
title = item.title.contents[0]
|
|
content = item.fetch('content:encoded')[0].contents[0]
|
|
filename = item.fetch('wp:post_name')[0].contents[0]
|
|
|
|
raw_date = item.fetch('wp:post_date')[0].contents[0]
|
|
date_object = time.strptime(raw_date, "%Y-%m-%d %H:%M:%S")
|
|
date = time.strftime("%Y-%m-%d %H:%M", date_object)
|
|
|
|
author = item.fetch('dc:creator')[0].contents[0].title()
|
|
|
|
categories = [cat.contents[0] for cat in item.fetch(domain='category')]
|
|
# caturl = [cat['nicename'] for cat in item.fetch(domain='category')]
|
|
|
|
tags = [tag.contents[0].title() for tag in item.fetch(domain='tag', nicename=None)]
|
|
|
|
yield (title, content, filename, date, author, categories, tags, "html")
|
|
|
|
def dc2fields(file):
|
|
"""Opens a Dotclear export file, and yield pelican fields"""
|
|
in_cat = False
|
|
in_post = False
|
|
category_list = {}
|
|
posts = []
|
|
|
|
with open(file, 'r', encoding='utf-8') as f:
|
|
|
|
for line in f:
|
|
# remove final \n
|
|
line = line[:-1]
|
|
|
|
if line.startswith('[category'):
|
|
in_cat = True
|
|
elif line.startswith('[post'):
|
|
in_post = True
|
|
elif in_cat:
|
|
fields = line.split('","')
|
|
if not line:
|
|
in_cat = False
|
|
else:
|
|
# remove 1st and last ""
|
|
fields[0] = fields[0][1:]
|
|
# fields[-1] = fields[-1][:-1]
|
|
category_list[fields[0]]=fields[2]
|
|
elif in_post:
|
|
if not line:
|
|
in_post = False
|
|
break
|
|
else:
|
|
posts.append(line)
|
|
|
|
print "%i posts read." % len(posts)
|
|
|
|
for post in posts:
|
|
fields = post.split('","')
|
|
|
|
# post_id = fields[0][1:]
|
|
# blog_id = fields[1]
|
|
# user_id = fields[2]
|
|
cat_id = fields[3]
|
|
# post_dt = fields[4]
|
|
# post_tz = fields[5]
|
|
post_creadt = fields[6]
|
|
# post_upddt = fields[7]
|
|
# post_password = fields[8]
|
|
post_type = fields[9]
|
|
post_format = fields[10]
|
|
post_url = fields[11]
|
|
post_lang = fields[12]
|
|
post_title = fields[13]
|
|
post_excerpt = fields[14]
|
|
post_excerpt_xhtml = fields[15]
|
|
post_content = fields[16]
|
|
post_content_xhtml = fields[17]
|
|
# post_notes = fields[18]
|
|
# post_words = fields[19]
|
|
# post_status = fields[20]
|
|
# post_selected = fields[21]
|
|
# post_position = fields[22]
|
|
# post_open_comment = fields[23]
|
|
# post_open_tb = fields[24]
|
|
# nb_comment = fields[25]
|
|
# nb_trackback = fields[26]
|
|
# post_meta = fields[27]
|
|
# redirect_url = fields[28][:-1]
|
|
|
|
# remove seconds
|
|
post_creadt = ':'.join(post_creadt.split(':')[0:2])
|
|
|
|
author = ""
|
|
categories = []
|
|
tags = []
|
|
|
|
if cat_id:
|
|
categories = [category_list[id].strip() for id in cat_id.split(',')]
|
|
|
|
if post_format == "markdown":
|
|
content = post_excerpt + post_content
|
|
else:
|
|
content = post_excerpt_xhtml + post_content_xhtml
|
|
content = content.replace('\\n', '')
|
|
post_format = "html"
|
|
|
|
yield (post_title, content, post_url, post_creadt, author, categories, tags, post_format)
|
|
|
|
|
|
def feed2fields(file):
|
|
"""Read a feed and yield pelican fields"""
|
|
import feedparser
|
|
d = feedparser.parse(file)
|
|
for entry in d.entries:
|
|
date = (time.strftime("%Y-%m-%d %H:%M", entry.updated_parsed)
|
|
if hasattr(entry, "updated_parsed") else None)
|
|
author = entry.author if hasattr(entry, "author") else None
|
|
tags = [e['term'] for e in entry.tags] if hasattr(entry, "tags") else None
|
|
|
|
slug = slugify(entry.title)
|
|
yield (entry.title, entry.description, slug, date, author, [], tags, "html")
|
|
|
|
|
|
def build_header(title, date, author, categories, tags):
|
|
"""Build a header from a list of fields"""
|
|
header = '%s\n%s\n' % (title, '#' * len(title))
|
|
if date:
|
|
header += ':date: %s\n' % date
|
|
if categories:
|
|
header += ':category: %s\n' % ', '.join(categories)
|
|
if tags:
|
|
header += ':tags: %s\n' % ', '.join(tags)
|
|
header += '\n'
|
|
return header
|
|
|
|
def build_markdown_header(title, date, author, categories, tags):
|
|
"""Build a header from a list of fields"""
|
|
header = 'Title: %s\n' % title
|
|
if date:
|
|
header += 'Date: %s\n' % date
|
|
if categories:
|
|
header += 'Category: %s\n' % ', '.join(categories)
|
|
if tags:
|
|
header += 'Tags: %s\n' % ', '.join(tags)
|
|
header += '\n'
|
|
return header
|
|
|
|
def fields2pelican(fields, output_path, dircat=False):
|
|
for title, content, filename, date, author, categories, tags, markup in fields:
|
|
if markup == "markdown":
|
|
ext = '.md'
|
|
header = build_markdown_header(title, date, author, categories, tags)
|
|
else:
|
|
ext = '.rst'
|
|
header = build_header(title, date, author, categories, tags)
|
|
|
|
filename = os.path.basename(filename)
|
|
|
|
# option to put files in directories with categories names
|
|
if dircat and (len(categories) == 1):
|
|
catname = categories[0]
|
|
out_filename = os.path.join(output_path, catname, filename+'.rst')
|
|
if not os.path.isdir(os.path.join(output_path, catname)):
|
|
os.mkdir(os.path.join(output_path, catname))
|
|
else:
|
|
out_filename = os.path.join(output_path, filename+ext)
|
|
|
|
print out_filename
|
|
|
|
if markup == "html":
|
|
html_filename = os.path.join(output_path, filename+'.html')
|
|
|
|
with open(html_filename, 'w', encoding='utf-8') as fp:
|
|
fp.write(content)
|
|
|
|
os.system('pandoc --normalize --reference-links --from=html --to=rst -o "%s" "%s"' % (out_filename,
|
|
html_filename))
|
|
|
|
os.remove(html_filename)
|
|
|
|
with open(out_filename, 'r', encoding='utf-8') as fs:
|
|
content = fs.read()
|
|
|
|
with open(out_filename, 'w', encoding='utf-8') as fs:
|
|
fs.write(header + content)
|
|
|
|
|
|
def main(input_type, input, output_path, dircat=False):
|
|
if input_type == 'wordpress':
|
|
fields = wp2fields(input)
|
|
elif input_type == 'dotclear':
|
|
fields = dc2fields(input)
|
|
elif input_type == 'feed':
|
|
fields = feed2fields(input)
|
|
|
|
fields2pelican(fields, output_path, dircat=dircat)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
parser = argparse.ArgumentParser(
|
|
description="Transform feed, Wordpress or Dotclear files to rst files."
|
|
"Be sure to have pandoc installed")
|
|
|
|
parser.add_argument(dest='input', help='The input file to read')
|
|
parser.add_argument('--wpfile', action='store_true', dest='wpfile',
|
|
help='Wordpress XML export')
|
|
parser.add_argument('--dotclear', action='store_true', dest='dotclear',
|
|
help='Dotclear export')
|
|
parser.add_argument('--feed', action='store_true', dest='feed',
|
|
help='Feed to parse')
|
|
parser.add_argument('-o', '--output', dest='output', default='output',
|
|
help='Output path')
|
|
parser.add_argument('--dir-cat', action='store_true', dest='dircat',
|
|
help='Put files in directories with categories name')
|
|
args = parser.parse_args()
|
|
|
|
input_type = None
|
|
if args.wpfile:
|
|
input_type = 'wordpress'
|
|
elif args.dotclear:
|
|
input_type = 'dotclear'
|
|
elif args.feed:
|
|
input_type = 'feed'
|
|
else:
|
|
print "you must provide either --wpfile, --dotclear or --feed options"
|
|
exit()
|
|
main(input_type, args.input, args.output, dircat=args.dircat)
|