1
0
Fork 0
forked from github/pelican
pelican-theme/pelican/tools/pelican_import.py

896 lines
32 KiB
Python
Raw Normal View History

#!/usr/bin/env python
# -*- coding: utf-8 -*-
2015-06-16 09:25:09 +02:00
from __future__ import print_function, unicode_literals
import argparse
2015-06-16 09:25:09 +02:00
import logging
import os
import re
import subprocess
import sys
import time
from codecs import open
2015-06-16 09:25:09 +02:00
from six.moves.urllib.error import URLError
from six.moves.urllib.parse import urlparse
from six.moves.urllib.request import urlretrieve
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init
2015-06-16 09:25:09 +02:00
from pelican.utils import SafeDatetime, slugify
try:
from html import unescape # py3.4+
except ImportError:
from six.moves.html_parser import HTMLParser
unescape = HTMLParser().unescape
logger = logging.getLogger(__name__)
def decode_wp_content(content, br=True):
pre_tags = {}
if content.strip() == "":
return ""
content += "\n"
if "<pre" in content:
pre_parts = content.split("</pre>")
last_pre = pre_parts.pop()
content = ""
pre_index = 0
for pre_part in pre_parts:
start = pre_part.find("<pre")
if start == -1:
content = content + pre_part
continue
name = "<pre wp-pre-tag-{0}></pre>".format(pre_index)
pre_tags[name] = pre_part[start:] + "</pre>"
content = content + pre_part[0:start] + name
pre_index += 1
content = content + last_pre
content = re.sub(r'<br />\s*<br />', "\n\n", content)
allblocks = ('(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|'
'td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|'
'map|area|blockquote|address|math|style|p|h[1-6]|hr|'
'fieldset|noscript|samp|legend|section|article|aside|'
'hgroup|header|footer|nav|figure|figcaption|details|'
'menu|summary)')
content = re.sub(r'(<' + allblocks + r'[^>]*>)', "\n\\1", content)
content = re.sub(r'(</' + allblocks + r'>)', "\\1\n\n", content)
# content = content.replace("\r\n", "\n")
if "<object" in content:
# no <p> inside object/embed
content = re.sub(r'\s*<param([^>]*)>\s*', "<param\\1>", content)
content = re.sub(r'\s*</embed>\s*', '</embed>', content)
# content = re.sub(r'/\n\n+/', '\n\n', content)
pgraphs = filter(lambda s: s != "", re.split(r'\n\s*\n', content))
content = ""
for p in pgraphs:
content = content + "<p>" + p.strip() + "</p>\n"
2015-06-16 09:25:09 +02:00
# under certain strange conditions it could create
# a P of entirely whitespace
content = re.sub(r'<p>\s*</p>', '', content)
2015-06-16 09:25:09 +02:00
content = re.sub(
r'<p>([^<]+)</(div|address|form)>',
"<p>\\1</p></\\2>",
content)
# don't wrap tags
2015-06-16 09:25:09 +02:00
content = re.sub(
r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>',
"\\1",
content)
# problem with nested lists
content = re.sub(r'<p>(<li.*)</p>', "\\1", content)
content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content)
content = content.replace('</blockquote></p>', '</p></blockquote>')
content = re.sub(r'<p>\s*(</?' + allblocks + '[^>]*>)', "\\1", content)
content = re.sub(r'(</?' + allblocks + '[^>]*>)\s*</p>', "\\1", content)
if br:
def _preserve_newline(match):
return match.group(0).replace("\n", "<WPPreserveNewline />")
2015-06-16 09:25:09 +02:00
content = re.sub(
r'/<(script|style).*?<\/\\1>/s',
_preserve_newline,
content)
# optionally make line breaks
content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content)
content = content.replace("<WPPreserveNewline />", "\n")
2015-06-16 09:25:09 +02:00
content = re.sub(
r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1",
content)
content = re.sub(
r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)',
'\\1',
content)
content = re.sub(r'\n</p>', "</p>", content)
if pre_tags:
def _multi_replace(dic, string):
pattern = r'|'.join(map(re.escape, dic.keys()))
return re.sub(pattern, lambda m: dic[m.group()], string)
content = _multi_replace(pre_tags, content)
return content
2015-06-16 09:25:09 +02:00
def get_items(xml):
2015-05-24 13:04:35 +01:00
"""Opens a WordPress xml file and returns a list of items"""
try:
from bs4 import BeautifulSoup
except ImportError:
2015-06-16 09:25:09 +02:00
error = ('Missing dependency "BeautifulSoup4" and "lxml" required to '
'import WordPress XML files.')
sys.exit(error)
with open(xml, encoding='utf-8') as infile:
xmlfile = infile.read()
2014-02-09 08:45:06 -08:00
soup = BeautifulSoup(xmlfile, "xml")
items = soup.rss.channel.findAll('item')
return items
2015-06-16 09:25:09 +02:00
def get_filename(filename, post_id):
if filename is not None:
return filename
else:
return post_id
2015-06-16 09:25:09 +02:00
def wp2fields(xml, wp_custpost=False):
"""Opens a wordpress XML file, and yield Pelican fields"""
2013-07-05 01:08:45 +02:00
items = get_items(xml)
for item in items:
if item.find('status').string in ["publish", "draft"]:
try:
# Use HTMLParser due to issues with BeautifulSoup 3
title = unescape(item.title.contents[0])
except IndexError:
title = 'No title [%s]' % item.find('post_name').string
logger.warning('Post "%s" is lacking a proper title', title)
filename = item.find('post_name').string
post_id = item.find('post_id').string
filename = get_filename(filename, post_id)
2013-07-05 01:08:45 +02:00
content = item.find('encoded').string
raw_date = item.find('post_date').string
2015-06-16 09:25:09 +02:00
date_object = time.strptime(raw_date, '%Y-%m-%d %H:%M:%S')
date = time.strftime('%Y-%m-%d %H:%M', date_object)
author = item.find('creator').string
2015-06-16 09:25:09 +02:00
categories = [cat.string for cat
in item.findAll('category', {'domain': 'category'})]
2015-06-16 09:25:09 +02:00
tags = [tag.string for tag
in item.findAll('category', {'domain': 'post_tag'})]
# To publish a post the status should be 'published'
2015-06-16 09:25:09 +02:00
status = 'published' if item.find('status').string == "publish" \
else item.find('status').string
kind = 'article'
post_type = item.find('post_type').string
if post_type == 'page':
kind = 'page'
elif wp_custpost:
if post_type == 'post':
pass
2015-06-16 09:25:09 +02:00
# Old behaviour was to name everything not a page as an
# article.Theoretically all attachments have status == inherit
# so no attachments should be here. But this statement is to
# maintain existing behaviour in case that doesn't hold true.
elif post_type == 'attachment':
pass
else:
kind = post_type
2015-06-16 09:25:09 +02:00
yield (title, content, filename, date, author, categories,
tags, status, kind, 'wp-html')
def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields"""
try:
from bs4 import BeautifulSoup
except ImportError:
2012-04-18 22:14:53 -07:00
error = ('Missing dependency '
2015-06-16 09:25:09 +02:00
'"BeautifulSoup4" and "lxml" required '
'to import Dotclear files.')
sys.exit(error)
in_cat = False
in_post = False
category_list = {}
posts = []
with open(file, 'r', encoding='utf-8') as f:
for line in f:
# remove final \n
line = line[:-1]
if line.startswith('[category'):
in_cat = True
elif line.startswith('[post'):
in_post = True
elif in_cat:
fields = line.split('","')
if not line:
in_cat = False
else:
# remove 1st and last ""
fields[0] = fields[0][1:]
# fields[-1] = fields[-1][:-1]
2015-06-16 09:25:09 +02:00
category_list[fields[0]] = fields[2]
elif in_post:
if not line:
in_post = False
break
else:
posts.append(line)
2012-03-07 12:14:13 +00:00
print("%i posts read." % len(posts))
for post in posts:
fields = post.split('","')
# post_id = fields[0][1:]
# blog_id = fields[1]
# user_id = fields[2]
cat_id = fields[3]
# post_dt = fields[4]
# post_tz = fields[5]
post_creadt = fields[6]
# post_upddt = fields[7]
# post_password = fields[8]
# post_type = fields[9]
post_format = fields[10]
# post_url = fields[11]
# post_lang = fields[12]
post_title = fields[13]
post_excerpt = fields[14]
post_excerpt_xhtml = fields[15]
post_content = fields[16]
post_content_xhtml = fields[17]
# post_notes = fields[18]
# post_words = fields[19]
# post_status = fields[20]
# post_selected = fields[21]
# post_position = fields[22]
# post_open_comment = fields[23]
# post_open_tb = fields[24]
# nb_comment = fields[25]
# nb_trackback = fields[26]
post_meta = fields[27]
# redirect_url = fields[28][:-1]
# remove seconds
post_creadt = ':'.join(post_creadt.split(':')[0:2])
2015-06-16 09:25:09 +02:00
author = ''
categories = []
tags = []
if cat_id:
2015-06-16 09:25:09 +02:00
categories = [category_list[id].strip() for id
in cat_id.split(',')]
2011-09-30 22:48:16 +02:00
# Get tags related to a post
2015-06-16 09:25:09 +02:00
tag = (post_meta.replace('{', '')
.replace('}', '')
.replace('a:1:s:3:\\"tag\\";a:', '')
.replace('a:0:', ''))
if len(tag) > 1:
if int(len(tag[:1])) == 1:
newtag = tag.split('"')[1]
tags.append(
BeautifulSoup(
2015-06-16 09:25:09 +02:00
newtag,
'xml'
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
else:
2015-06-16 09:25:09 +02:00
i = 1
j = 1
while(i <= int(tag[:1])):
2015-06-16 09:25:09 +02:00
newtag = tag.split('"')[j].replace('\\', '')
tags.append(
BeautifulSoup(
2015-06-16 09:25:09 +02:00
newtag,
'xml'
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
2015-06-16 09:25:09 +02:00
i = i + 1
if j < int(tag[:1]) * 2:
j = j + 2
2011-08-30 22:27:43 +02:00
"""
2015-06-16 09:25:09 +02:00
dotclear2 does not use markdown by default unless
you use the markdown plugin
2011-09-30 22:48:16 +02:00
Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
2011-08-30 22:27:43 +02:00
"""
if post_format == "markdown":
content = post_excerpt + post_content
else:
content = post_excerpt_xhtml + post_content_xhtml
content = content.replace('\\n', '')
post_format = "html"
kind = 'article' # TODO: Recognise pages
status = 'published' # TODO: Find a way for draft posts
yield (post_title, content, slugify(post_title), post_creadt, author,
categories, tags, status, kind, post_format)
def posterous2fields(api_token, email, password):
"""Imports posterous posts"""
import base64
from datetime import timedelta
try:
# py3k import
import json
except ImportError:
# py2 import
import simplejson as json
try:
# py3k import
import urllib.request as urllib_request
except ImportError:
# py2 import
import urllib2 as urllib_request
2015-06-16 09:25:09 +02:00
def get_posterous_posts(api_token, email, password, page=1):
base64string = base64.encodestring(
("%s:%s" % (email, password)).encode('utf-8')).replace('\n', '')
url = ("http://posterous.com/api/v2/users/me/sites/primary/"
"posts?api_token=%s&page=%d") % (api_token, page)
request = urllib_request.Request(url)
2015-06-16 09:25:09 +02:00
request.add_header('Authorization', 'Basic %s' % base64string.decode())
handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8'))
return posts
page = 1
posts = get_posterous_posts(api_token, email, password, page)
while len(posts) > 0:
posts = get_posterous_posts(api_token, email, password, page)
page += 1
for post in posts:
slug = post.get('slug')
if not slug:
slug = slugify(post.get('title'))
tags = [tag.get('name') for tag in post.get('tags')]
raw_date = post.get('display_date')
2015-06-16 09:25:09 +02:00
date_object = SafeDatetime.strptime(
raw_date[:-6], '%Y/%m/%d %H:%M:%S')
offset = int(raw_date[-5:])
2015-06-16 09:25:09 +02:00
delta = timedelta(hours=(offset / 100))
date_object -= delta
2015-06-16 09:25:09 +02:00
date = date_object.strftime('%Y-%m-%d %H:%M')
kind = 'article' # TODO: Recognise pages
status = 'published' # TODO: Find a way for draft posts
2015-06-16 09:25:09 +02:00
yield (post.get('title'), post.get('body_cleaned'),
slug, date, post.get('user').get('display_name'),
[], tags, status, kind, 'html')
def tumblr2fields(api_key, blogname):
""" Imports Tumblr posts (API v2)"""
from time import strftime, localtime
try:
# py3k import
import json
except ImportError:
# py2 import
import simplejson as json
try:
# py3k import
import urllib.request as urllib_request
except ImportError:
# py2 import
import urllib2 as urllib_request
def get_tumblr_posts(api_key, blogname, offset=0):
2015-06-16 09:25:09 +02:00
url = ("http://api.tumblr.com/v2/blog/%s.tumblr.com/"
"posts?api_key=%s&offset=%d&filter=raw") % (
blogname, api_key, offset)
request = urllib_request.Request(url)
handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8'))
return posts.get('response').get('posts')
offset = 0
posts = get_tumblr_posts(api_key, blogname, offset)
while len(posts) > 0:
for post in posts:
2015-06-16 09:25:09 +02:00
title = \
post.get('title') or \
post.get('source_title') or \
post.get('type').capitalize()
slug = post.get('slug') or slugify(title)
tags = post.get('tags')
timestamp = post.get('timestamp')
date = strftime("%Y-%m-%d %H:%M:%S", localtime(int(timestamp)))
slug = strftime("%Y-%m-%d-", localtime(int(timestamp))) + slug
format = post.get('format')
content = post.get('body')
type = post.get('type')
if type == 'photo':
if format == 'markdown':
fmtstr = '![%s](%s)'
else:
fmtstr = '<img alt="%s" src="%s" />'
2015-06-16 09:25:09 +02:00
content = ''
for photo in post.get('photos'):
content += '\n'.join(
fmtstr % (photo.get('caption'),
photo.get('original_size').get('url')))
content += '\n\n' + post.get('caption')
elif type == 'quote':
if format == 'markdown':
fmtstr = '\n\n&mdash; %s'
else:
fmtstr = '<p>&mdash; %s</p>'
content = post.get('text') + fmtstr % post.get('source')
elif type == 'link':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('url') + post.get('description')
elif type == 'audio':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
2015-06-16 09:25:09 +02:00
content = fmtstr % post.get('source_url') + \
post.get('caption') + \
post.get('player')
elif type == 'video':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
2015-06-16 09:25:09 +02:00
source = fmtstr % post.get('source_url')
caption = post.get('caption')
players = '\n'.join(player.get('embed_code')
for player in post.get('player'))
content = source + caption + players
elif type == 'answer':
title = post.get('question')
2015-06-16 09:25:09 +02:00
content = ('<p>'
'<a href="%s" rel="external nofollow">%s</a>'
': %s'
'</p>\n'
' %s' % (post.get('asking_name'),
post.get('asking_url'),
post.get('question'),
post.get('answer')))
content = content.rstrip() + '\n'
kind = 'article'
status = 'published' # TODO: Find a way for draft posts
yield (title, content, slug, date, post.get('blog_name'), [type],
tags, status, kind, format)
offset += len(posts)
posts = get_tumblr_posts(api_key, blogname, offset)
2015-06-16 09:25:09 +02:00
def feed2fields(file):
"""Read a feed and yield pelican fields"""
import feedparser
d = feedparser.parse(file)
for entry in d.entries:
2015-06-16 09:25:09 +02:00
date = (time.strftime('%Y-%m-%d %H:%M', entry.updated_parsed)
if hasattr(entry, 'updated_parsed') else None)
author = entry.author if hasattr(entry, 'author') else None
tags = ([e['term'] for e in entry.tags]
if hasattr(entry, 'tags') else None)
slug = slugify(entry.title)
kind = 'article'
2015-06-16 09:25:09 +02:00
yield (entry.title, entry.description, slug, date,
author, [], tags, None, kind, 'html')
2015-06-16 09:25:09 +02:00
def build_header(title, date, author, categories, tags, slug,
status=None, attachments=None):
"""Build a header from a list of fields"""
2015-06-16 09:25:09 +02:00
from docutils.utils import column_width
header = '%s\n%s\n' % (title, '#' * column_width(title))
if date:
header += ':date: %s\n' % date
if author:
2012-09-26 19:59:47 +01:00
header += ':author: %s\n' % author
if categories:
header += ':category: %s\n' % ', '.join(categories)
if tags:
header += ':tags: %s\n' % ', '.join(tags)
if slug:
header += ':slug: %s\n' % slug
if status:
header += ':status: %s\n' % status
if attachments:
header += ':attachments: %s\n' % ', '.join(attachments)
header += '\n'
return header
2015-06-16 09:25:09 +02:00
def build_markdown_header(title, date, author, categories, tags,
slug, status=None, attachments=None):
"""Build a header from a list of fields"""
header = 'Title: %s\n' % title
if date:
header += 'Date: %s\n' % date
if author:
header += 'Author: %s\n' % author
if categories:
header += 'Category: %s\n' % ', '.join(categories)
if tags:
header += 'Tags: %s\n' % ', '.join(tags)
if slug:
header += 'Slug: %s\n' % slug
if status:
header += 'Status: %s\n' % status
if attachments:
header += 'Attachments: %s\n' % ', '.join(attachments)
header += '\n'
return header
2015-06-16 09:25:09 +02:00
def get_ext(out_markup, in_markup='html'):
if in_markup == 'markdown' or out_markup == 'markdown':
ext = '.md'
else:
ext = '.rst'
return ext
2013-07-05 01:08:45 +02:00
2015-06-16 09:25:09 +02:00
2013-07-05 01:08:45 +02:00
def get_out_filename(output_path, filename, ext, kind,
2015-06-16 09:25:09 +02:00
dirpage, dircat, categories, wp_custpost):
filename = os.path.basename(filename)
# Enforce filename restrictions for various filesystems at once; see
# http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
# we do not need to filter words because an extension will be appended
2015-06-16 09:25:09 +02:00
filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
filename = filename.lstrip('.') # should not start with a dot
if not filename:
filename = '_'
2015-06-16 09:25:09 +02:00
filename = filename[:249] # allow for 5 extra characters
2015-06-16 09:25:09 +02:00
out_filename = os.path.join(output_path, filename + ext)
# option to put page posts in pages/ subdirectory
if dirpage and kind == 'page':
pages_dir = os.path.join(output_path, 'pages')
if not os.path.isdir(pages_dir):
os.mkdir(pages_dir)
2015-06-16 09:25:09 +02:00
out_filename = os.path.join(pages_dir, filename + ext)
elif not dirpage and kind == 'page':
2013-07-05 01:08:45 +02:00
pass
# option to put wp custom post types in directories with post type
# names. Custom post types can also have categories so option to
# create subdirectories with category names
elif kind != 'article':
if wp_custpost:
typename = slugify(kind)
else:
typename = ''
kind = 'article'
if dircat and (len(categories) > 0):
catname = slugify(categories[0])
else:
catname = ''
2013-07-05 01:08:45 +02:00
out_filename = os.path.join(output_path, typename,
2015-06-16 09:25:09 +02:00
catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, typename, catname)):
os.makedirs(os.path.join(output_path, typename, catname))
# option to put files in directories with categories names
elif dircat and (len(categories) > 0):
catname = slugify(categories[0])
2015-06-16 09:25:09 +02:00
out_filename = os.path.join(output_path, catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, catname)):
os.mkdir(os.path.join(output_path, catname))
return out_filename
2015-06-16 09:25:09 +02:00
def get_attachments(xml):
2013-07-05 01:08:45 +02:00
"""returns a dictionary of posts that have attachments with a list
of the attachment_urls
"""
items = get_items(xml)
names = {}
attachments = []
2013-07-05 01:08:45 +02:00
for item in items:
kind = item.find('post_type').string
filename = item.find('post_name').string
post_id = item.find('post_id').string
2013-07-05 01:08:45 +02:00
if kind == 'attachment':
2013-07-05 01:08:45 +02:00
attachments.append((item.find('post_parent').string,
2015-06-16 09:25:09 +02:00
item.find('attachment_url').string))
else:
filename = get_filename(filename, post_id)
names[post_id] = filename
attachedposts = {}
for parent, url in attachments:
try:
parent_name = names[parent]
except KeyError:
2015-06-16 09:25:09 +02:00
# attachment's parent is not a valid post
parent_name = None
2013-07-05 01:08:45 +02:00
try:
attachedposts[parent_name].append(url)
except KeyError:
attachedposts[parent_name] = []
attachedposts[parent_name].append(url)
return attachedposts
2015-06-16 09:25:09 +02:00
def download_attachments(output_path, urls):
2015-05-24 13:04:35 +01:00
"""Downloads WordPress attachments and returns a list of paths to
2013-07-05 01:08:45 +02:00
attachments that can be associated with a post (relative path to output
directory). Files that fail to download, will not be added to posts"""
locations = []
for url in urls:
path = urlparse(url).path
2015-06-16 09:25:09 +02:00
# teardown path and rebuild to negate any errors with
# os.path.join and leading /'s
path = path.split('/')
filename = path.pop(-1)
localpath = ''
for item in path:
if sys.platform != 'win32' or ':' not in item:
localpath = os.path.join(localpath, item)
full_path = os.path.join(output_path, localpath)
if not os.path.exists(full_path):
os.makedirs(full_path)
print('downloading {}'.format(filename))
try:
urlretrieve(url, os.path.join(full_path, filename))
locations.append(os.path.join(localpath, filename))
except (URLError, IOError) as e:
2015-06-16 09:25:09 +02:00
# Python 2.7 throws an IOError rather Than URLError
logger.warning("No file could be downloaded from %s\n%s", url, e)
return locations
2015-06-16 09:25:09 +02:00
def fields2pelican(
fields, out_markup, output_path,
dircat=False, strip_raw=False, disable_slugs=False,
dirpage=False, filename_template=None, filter_author=None,
wp_custpost=False, wp_attach=False, attachments=None):
for (title, content, filename, date, author, categories, tags, status,
kind, in_markup) in fields:
if filter_author and filter_author != author:
continue
slug = not disable_slugs and filename or None
if wp_attach and attachments:
try:
urls = attachments[filename]
attached_files = download_attachments(output_path, urls)
except KeyError:
attached_files = None
2013-07-05 01:08:45 +02:00
else:
attached_files = None
ext = get_ext(out_markup, in_markup)
if ext == '.md':
2015-06-16 09:25:09 +02:00
header = build_markdown_header(
title, date, author, categories, tags, slug,
status, attached_files)
else:
2015-06-16 09:25:09 +02:00
out_markup = 'rst'
2013-07-05 01:08:45 +02:00
header = build_header(title, date, author, categories,
2015-06-16 09:25:09 +02:00
tags, slug, status, attached_files)
2015-06-16 09:25:09 +02:00
out_filename = get_out_filename(
output_path, filename, ext, kind, dirpage, dircat,
categories, wp_custpost)
2012-03-07 12:14:13 +00:00
print(out_filename)
2015-06-16 09:25:09 +02:00
if in_markup in ('html', 'wp-html'):
html_filename = os.path.join(output_path, filename + '.html')
with open(html_filename, 'w', encoding='utf-8') as fp:
# Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion
2015-06-16 09:25:09 +02:00
if in_markup == 'wp-html':
new_content = decode_wp_content(content)
else:
paragraphs = content.splitlines()
paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]
new_content = ''.join(paragraphs)
2012-04-18 00:20:54 -07:00
fp.write(new_content)
parse_raw = '--parse-raw' if not strip_raw else ''
cmd = ('pandoc --normalize {0} --from=html'
2015-06-16 09:25:09 +02:00
' --to={1} -o "{2}" "{3}"')
cmd = cmd.format(parse_raw, out_markup,
out_filename, html_filename)
try:
rc = subprocess.call(cmd, shell=True)
if rc < 0:
2015-06-16 09:25:09 +02:00
error = 'Child was terminated by signal %d' % -rc
2012-06-10 13:27:36 +02:00
exit(error)
elif rc > 0:
2015-06-16 09:25:09 +02:00
error = 'Please, check your Pandoc installation.'
2012-06-10 13:27:36 +02:00
exit(error)
except OSError as e:
2015-06-16 09:25:09 +02:00
error = 'Pandoc execution failed: %s' % e
2012-06-10 13:27:36 +02:00
exit(error)
os.remove(html_filename)
with open(out_filename, 'r', encoding='utf-8') as fs:
content = fs.read()
2015-06-16 09:25:09 +02:00
if out_markup == 'markdown':
# In markdown, to insert a <br />, end a line with two
# or more spaces & then a end-of-line
content = content.replace('\\\n ', ' \n')
content = content.replace('\\\n', ' \n')
with open(out_filename, 'w', encoding='utf-8') as fs:
fs.write(header + content)
if wp_attach and attachments and None in attachments:
print("downloading attachments that don't have a parent post")
urls = attachments[None]
2015-06-16 09:25:09 +02:00
download_attachments(output_path, urls)
2013-07-05 01:08:45 +02:00
def main():
parser = argparse.ArgumentParser(
2015-06-16 09:25:09 +02:00
description="Transform feed, WordPress, Tumblr, Dotclear, or "
"Posterous files into reST (rst) or Markdown (md) files. "
"Be sure to have pandoc installed.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
2015-06-16 09:25:09 +02:00
parser.add_argument(
dest='input', help='The input file to read')
parser.add_argument(
'--wpfile', action='store_true', dest='wpfile',
help='Wordpress XML export')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--dotclear', action='store_true', dest='dotclear',
help='Dotclear export')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--posterous', action='store_true', dest='posterous',
help='Posterous export')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--tumblr', action='store_true', dest='tumblr',
help='Tumblr export')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--feed', action='store_true', dest='feed',
help='Feed to parse')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'-o', '--output', dest='output', default='output',
help='Output path')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'-m', '--markup', dest='markup', default='rst',
help='Output markup format (supports rst & markdown)')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--dir-cat', action='store_true', dest='dircat',
help='Put files in directories with categories name')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--dir-page', action='store_true', dest='dirpage',
help=('Put files recognised as pages in "pages/" sub-directory'
' (wordpress import only)'))
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--filter-author', dest='author',
help='Import only post from the specified author')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--strip-raw', action='store_true', dest='strip_raw',
help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)")
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--wp-custpost', action='store_true',
dest='wp_custpost',
help='Put wordpress custom post types in directories. If used with '
'--dir-cat option directories will be created as '
'/post_type/category/ (wordpress import only)')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--wp-attach', action='store_true', dest='wp_attach',
help='(wordpress import only) Download files uploaded to wordpress as '
'attachments. Files will be added to posts as a list in the post '
'header. All files will be downloaded, even if '
"they aren't associated with a post. Files with be downloaded "
'with their original path inside the output directory. '
'e.g. output/wp-uploads/date/postname/file.jpg '
'-- Requires an internet connection --')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'--disable-slugs', action='store_true',
2013-01-11 18:44:35 +01:00
dest='disable_slugs',
help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.')
2015-06-16 09:25:09 +02:00
parser.add_argument(
'-e', '--email', dest='email',
help="Email address (posterous import only)")
2015-06-16 09:25:09 +02:00
parser.add_argument(
'-p', '--password', dest='password',
help="Password (posterous import only)")
2015-06-16 09:25:09 +02:00
parser.add_argument(
'-b', '--blogname', dest='blogname',
help="Blog name (Tumblr import only)")
args = parser.parse_args()
input_type = None
if args.wpfile:
input_type = 'wordpress'
elif args.dotclear:
input_type = 'dotclear'
elif args.posterous:
input_type = 'posterous'
elif args.tumblr:
input_type = 'tumblr'
elif args.feed:
input_type = 'feed'
else:
2015-06-16 09:25:09 +02:00
error = ('You must provide either --wpfile, --dotclear, '
'--posterous, --tumblr or --feed options')
2012-06-10 13:27:36 +02:00
exit(error)
2011-10-24 00:30:58 +05:30
if not os.path.exists(args.output):
try:
os.mkdir(args.output)
except OSError:
2015-06-16 09:25:09 +02:00
error = 'Unable to create the output folder: ' + args.output
2012-06-10 13:27:36 +02:00
exit(error)
2011-10-24 00:30:58 +05:30
if args.wp_attach and input_type != 'wordpress':
2015-06-16 09:25:09 +02:00
error = ('You must be importing a wordpress xml '
'to use the --wp-attach option')
exit(error)
2013-07-05 01:08:45 +02:00
if input_type == 'wordpress':
fields = wp2fields(args.input, args.wp_custpost or False)
elif input_type == 'dotclear':
fields = dc2fields(args.input)
elif input_type == 'posterous':
fields = posterous2fields(args.input, args.email, args.password)
elif input_type == 'tumblr':
fields = tumblr2fields(args.input, args.blogname)
elif input_type == 'feed':
fields = feed2fields(args.input)
if args.wp_attach:
attachments = get_attachments(args.input)
else:
attachments = None
2015-06-16 09:25:09 +02:00
# init logging
init()
fields2pelican(fields, args.markup, args.output,
dircat=args.dircat or False,
dirpage=args.dirpage or False,
strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False,
filter_author=args.author,
2015-06-16 09:25:09 +02:00
wp_custpost=args.wp_custpost or False,
wp_attach=args.wp_attach or False,
attachments=attachments or None)