migration fix, new html2text, export wip
This commit is contained in:
parent
7ec763391b
commit
14fdfe71e5
489
migrate.py
489
migrate.py
|
@ -17,263 +17,275 @@ IMG_REGEX = r"\!\[(.*?)\]\((data\:image\/(png|jpeg|jpg);base64\,(.*?))\)"
|
||||||
OLD_DATE = '2016-03-05 22:22:00.350000'
|
OLD_DATE = '2016-03-05 22:22:00.350000'
|
||||||
|
|
||||||
|
|
||||||
def extract_images(article):
|
if __name__ == '__main__':
|
||||||
''' extract b64 encoded images from markdown in article body '''
|
import sys
|
||||||
body = article['body']
|
|
||||||
images = []
|
|
||||||
matches = re.finditer(IMG_REGEX, body, re.IGNORECASE | re.MULTILINE)
|
|
||||||
for i, match in enumerate(matches, start=1):
|
|
||||||
ext = match.group(3)
|
|
||||||
link = '/static/upload/image-' + \
|
|
||||||
article['old_id'] + str(i) + '.' + ext
|
|
||||||
img = match.group(4)
|
|
||||||
if img not in images:
|
|
||||||
open('..' + link, 'wb').write(base64.b64decode(img))
|
|
||||||
images.append(img)
|
|
||||||
body = body.replace(match.group(2), link)
|
|
||||||
print(link)
|
|
||||||
article['body'] = body
|
|
||||||
return article
|
|
||||||
|
|
||||||
|
users_data = json.loads(open('migration/data/users.json').read())
|
||||||
|
users_dict = { x['_id']: x for x in users_data } # by id
|
||||||
|
print(str(len(users_data)) + ' users loaded')
|
||||||
|
users_by_oid = {}
|
||||||
|
users_by_slug = {}
|
||||||
|
|
||||||
def users():
|
tags_data = json.loads(open('migration/data/tags.json').read())
|
||||||
''' migrating users first '''
|
print(str(len(tags_data)) + ' tags loaded')
|
||||||
print('migrating users...')
|
|
||||||
newdata = {}
|
|
||||||
data = json.loads(open('migration/data/users.json').read())
|
|
||||||
counter = 0
|
|
||||||
export_data = {}
|
|
||||||
for entry in data:
|
|
||||||
oid = entry['_id']
|
|
||||||
user = migrateUser(entry)
|
|
||||||
newdata[oid] = user
|
|
||||||
del user['password']
|
|
||||||
del user['notifications']
|
|
||||||
# del user['oauth']
|
|
||||||
del user['emailConfirmed']
|
|
||||||
del user['username']
|
|
||||||
del user['email']
|
|
||||||
export_data[user['slug']] = user
|
|
||||||
counter += 1
|
|
||||||
export_list = sorted(export_data.items(), key=lambda item: item[1]['rating'])[-10:]
|
|
||||||
open('migration/data/users.dict.json', 'w').write(json.dumps(newdata, cls=DateTimeEncoder)) # NOTE: by old_id
|
|
||||||
open('../src/data/authors.json', 'w').write(json.dumps(dict(export_list),
|
|
||||||
cls=DateTimeEncoder,
|
|
||||||
indent=4,
|
|
||||||
sort_keys=True,
|
|
||||||
ensure_ascii=False))
|
|
||||||
print(str(len(newdata.items())) + ' user accounts were migrated')
|
|
||||||
print(str(len(export_list)) + ' authors were exported')
|
|
||||||
|
|
||||||
|
|
||||||
def topics():
|
|
||||||
''' topics from categories and tags '''
|
|
||||||
print('migrating topics...')
|
|
||||||
cats_data = json.loads(open('migration/data/content_item_categories.json').read())
|
cats_data = json.loads(open('migration/data/content_item_categories.json').read())
|
||||||
cat_topics = {}
|
print(str(len(cats_data)) + ' cats loaded')
|
||||||
slug_topics = {}
|
topics_by_cat = {}
|
||||||
counter = 0
|
topics_by_tag = {}
|
||||||
try:
|
topics_by_slug = {}
|
||||||
for cat in cats_data:
|
|
||||||
topic = migrateCategory(cat)
|
|
||||||
cat_topics[topic['cat_id']] = topic
|
|
||||||
slug_topics[topic['slug']] = topic
|
|
||||||
counter += 1
|
|
||||||
except Exception as e:
|
|
||||||
print('cats exception, try to remove database first')
|
|
||||||
raise e
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
for tag in tag_data:
|
|
||||||
topic = migrateTag(tag)
|
|
||||||
newdata[topic['slug']] = topic
|
|
||||||
counter += 1
|
|
||||||
except Exception:
|
|
||||||
print('tags exception, try to remove database first')
|
|
||||||
raise Exception
|
|
||||||
'''
|
|
||||||
export_list = sorted(slug_topics.items(), key=lambda item: str(
|
|
||||||
item[1]['createdAt']))
|
|
||||||
open('migration/data/topics.dict.json','w').write(json.dumps(cat_topics,
|
|
||||||
cls=DateTimeEncoder,
|
|
||||||
indent=4,
|
|
||||||
sort_keys=True,
|
|
||||||
ensure_ascii=False))
|
|
||||||
open('../src/data/topics.json', 'w').write(json.dumps(dict(export_list),
|
|
||||||
cls=DateTimeEncoder,
|
|
||||||
indent=4,
|
|
||||||
sort_keys=True,
|
|
||||||
ensure_ascii=False))
|
|
||||||
#' tags and ' + str(len(tag_data)) +
|
|
||||||
print(str(counter) + ' / ' + str(len(cats_data)) + ' migrated')
|
|
||||||
print(str(len(export_list)) + ' topics were exported')
|
|
||||||
|
|
||||||
|
|
||||||
def shouts():
|
|
||||||
''' migrating content items one by one '''
|
|
||||||
print('loading shouts...')
|
|
||||||
counter = 0
|
|
||||||
discours_author = 0
|
|
||||||
content_data = json.loads(open('migration/data/content_items.json').read())
|
content_data = json.loads(open('migration/data/content_items.json').read())
|
||||||
content_dict = { x['_id']:x for x in content_data }
|
content_dict = { x['_id']: x for x in content_data }
|
||||||
newdata = {}
|
|
||||||
print(str(len(content_data)) + ' entries loaded. now migrating...')
|
|
||||||
errored = []
|
|
||||||
for entry in content_data:
|
|
||||||
try:
|
|
||||||
shout = migrateShout(entry)
|
|
||||||
newdata[shout['slug']] = shout
|
|
||||||
author = newdata[shout['slug']]['authors'][0]['slug']
|
|
||||||
line = str(counter+1) + ': ' + shout['slug'] + " @" + str(author)
|
|
||||||
print(line)
|
|
||||||
counter += 1
|
|
||||||
if author == 'discours':
|
|
||||||
discours_author += 1
|
|
||||||
open('./shouts.id.log', 'a').write(line + '\n')
|
|
||||||
except Exception as e:
|
|
||||||
print(entry['_id'])
|
|
||||||
errored.append(entry)
|
|
||||||
raise e
|
|
||||||
try:
|
|
||||||
limit = int(sys.argv[2]) if len(sys.argv) > 2 else len(content_data)
|
|
||||||
except ValueError:
|
|
||||||
limit = len(content_data)
|
|
||||||
open('migration/data/shouts.dict.json',
|
|
||||||
'w').write(json.dumps(newdata, cls=DateTimeEncoder))
|
|
||||||
print(str(counter) + '/' + str(len(content_data)) +
|
|
||||||
' content items were migrated')
|
|
||||||
print(str(discours_author) + ' from them by @discours')
|
|
||||||
|
|
||||||
def comments():
|
|
||||||
''' migrating comments on content items one by one '''
|
|
||||||
content_data = json.loads(open('migration/data/content_items.json').read()) # old content
|
|
||||||
content_dict = { x['_id']: x for x in content_data } # by slug
|
|
||||||
shouts_dict = json.loads(open('migration/data/shouts.dict.json', 'r').read()) # all shouts by slug
|
|
||||||
print(str(len(shouts_dict.keys())) + ' migrated shouts loaded')
|
|
||||||
shouts_old = { x['old_id']: x for slug, x in shouts_dict.items() } # shouts by old_id
|
|
||||||
print(str(len(content_data)) + ' content items loaded')
|
print(str(len(content_data)) + ' content items loaded')
|
||||||
comments_data = json.loads(open('migration/data/comments.json').read()) # by slug
|
shouts_by_slug = {}
|
||||||
|
shouts_by_oid = {}
|
||||||
|
|
||||||
|
comments_data = json.loads(open('migration/data/comments.json').read())
|
||||||
print(str(len(comments_data)) + ' comments loaded')
|
print(str(len(comments_data)) + ' comments loaded')
|
||||||
comments_by_post = {}
|
comments_by_post = {}
|
||||||
|
|
||||||
# sort comments by old posts ids
|
# sort comments by old posts ids
|
||||||
for old_comment in comments_data:
|
for old_comment in comments_data:
|
||||||
cid = old_comment['contentItem']
|
cid = old_comment['contentItem']
|
||||||
comments_by_post[cid] = comments_by_post.get(cid, [])
|
comments_by_post[cid] = comments_by_post.get(cid, [])
|
||||||
comments_by_post[cid].append(old_comment)
|
comments_by_post[cid].append(old_comment)
|
||||||
# migrate comments
|
print(str(len(comments_by_post.keys())) + ' articles with comments')
|
||||||
comments_by_shoutslug = {}
|
|
||||||
for content_item in content_data:
|
export_articles = {} # slug: shout
|
||||||
old_id = content_item['_id']
|
export_authors = {} # slug: user
|
||||||
if content_item.get('commentedAt', False):
|
export_comments = {} # shout-slug: comment[] (list)
|
||||||
comments = [ migrateComment(c) for c in comments_by_post.get(old_id, []) ]
|
export_topics = {} # slug: topic
|
||||||
if comments.length > 0:
|
|
||||||
shout = shouts_old.get(old_id, { 'slug': 'abandoned-comments' })
|
|
||||||
comments_by_shoutslug[shout['slug']] = comments
|
def extract_images(article):
|
||||||
export_articles = json.loads(open('../src/data/articles.json').read())
|
''' extract b64 encoded images from markdown in article body '''
|
||||||
print(str(len(export_articles.items())) + ' articles were exported')
|
body = article['body']
|
||||||
export_comments = {}
|
images = []
|
||||||
c = 0
|
matches = re.finditer(IMG_REGEX, body, re.IGNORECASE | re.MULTILINE)
|
||||||
for slug, article in export_articles.items():
|
for i, match in enumerate(matches, start=1):
|
||||||
comments = comments_by_shoutslug.get(slug, [])
|
ext = match.group(3)
|
||||||
if len(comments) > 0:
|
link = '/static/upload/image-' + \
|
||||||
export_comments[slug] = comments
|
article['old_id'] + str(i) + '.' + ext
|
||||||
c += len(comments)
|
img = match.group(4)
|
||||||
print(str(len(export_comments.items())) + ' after adding those having comments')
|
if img not in images:
|
||||||
open('../src/data/comments.json', 'w').write(json.dumps(dict(export_comments),
|
open('..' + link, 'wb').write(base64.b64decode(img))
|
||||||
|
images.append(img)
|
||||||
|
body = body.replace(match.group(2), link)
|
||||||
|
print(link)
|
||||||
|
article['body'] = body
|
||||||
|
return article
|
||||||
|
|
||||||
|
|
||||||
|
def users():
|
||||||
|
''' migrating users first '''
|
||||||
|
# limiting
|
||||||
|
limit = len(users_data)
|
||||||
|
if len(sys.argv) > 2: limit = int(sys.argv[2])
|
||||||
|
print('migrating %d users...' % limit)
|
||||||
|
counter = 0
|
||||||
|
for entry in users_data:
|
||||||
|
oid = entry['_id']
|
||||||
|
user = migrateUser(entry)
|
||||||
|
users_by_oid[oid] = user # full
|
||||||
|
del user['password']
|
||||||
|
del user['notifications']
|
||||||
|
# del user['oauth']
|
||||||
|
del user['emailConfirmed']
|
||||||
|
del user['username']
|
||||||
|
del user['email']
|
||||||
|
users_by_slug[user['slug']] = user # public
|
||||||
|
counter += 1
|
||||||
|
export_authors = dict(sorted(users_by_slug.items(), key=lambda item: item[1]['rating'])[-10:])
|
||||||
|
open('migration/data/users.old_id.json', 'w').write(json.dumps(users_by_oid, cls=DateTimeEncoder)) # NOTE: by old_id
|
||||||
|
open('migration/data/users.slug.json', 'w').write(json.dumps(users_by_slug, cls=DateTimeEncoder)) # NOTE: by old_id
|
||||||
|
print(str(len(users_by_slug.items())) + ' users migrated')
|
||||||
|
|
||||||
|
|
||||||
|
def topics():
|
||||||
|
''' topics from categories and tags '''
|
||||||
|
# limiting
|
||||||
|
limit = len(cats_data) + len(tags_data)
|
||||||
|
if len(sys.argv) > 2: limit = int(sys.argv[2])
|
||||||
|
print('migrating %d topics...' % limit)
|
||||||
|
counter = 0
|
||||||
|
for cat in cats_data:
|
||||||
|
try: topic = migrateCategory(cat)
|
||||||
|
except Exception as e: raise e
|
||||||
|
topics_by_cat[topic['cat_id']] = topic
|
||||||
|
topics_by_slug[topic['slug']] = topic
|
||||||
|
counter += 1
|
||||||
|
for tag in tags_data:
|
||||||
|
topic = migrateTag(tag)
|
||||||
|
topics_by_tag[topic['tag_id']] = topic
|
||||||
|
if not topics_by_slug.get(topic['slug']): topics_by_slug[topic['slug']] = topic
|
||||||
|
counter += 1
|
||||||
|
export_topics = dict(sorted(topics_by_slug.items(), key=lambda item: str(item[1]['createdAt']))) # NOTE: sorting does not work :)
|
||||||
|
open('migration/data/topics.slug.json','w').write(json.dumps(topics_by_slug,
|
||||||
cls=DateTimeEncoder,
|
cls=DateTimeEncoder,
|
||||||
indent=4,
|
indent=4,
|
||||||
sort_keys=True,
|
sort_keys=True,
|
||||||
ensure_ascii=False))
|
ensure_ascii=False))
|
||||||
print(str(c) + ' comments were exported')
|
|
||||||
|
|
||||||
|
open('migration/data/topics.cat_id.json','w').write(json.dumps(topics_by_cat,
|
||||||
|
|
||||||
def export_shouts(limit):
|
|
||||||
print('reading json...')
|
|
||||||
content_data = json.loads(open('migration/data/content_items.json').read())
|
|
||||||
content_dict = { x['_id']:x for x in content_data }
|
|
||||||
print(str(len(content_data)) + ' content items loaded')
|
|
||||||
newdata = json.loads(open('migration/data/shouts.dict.json', 'r').read())
|
|
||||||
print(str(len(newdata.keys())) + ' migrated shouts loaded')
|
|
||||||
users_old = json.loads(open('migration/data/users.dict.json').read())
|
|
||||||
print(str(len(newdata.keys())) + ' migrated users loaded')
|
|
||||||
export_authors = json.loads(open('../src/data/authors.json').read())
|
|
||||||
print(str(len(export_authors.items())) + ' exported authors loaded')
|
|
||||||
users_slug = { u['slug']: u for old_id, u in users_old.items()}
|
|
||||||
print(str(len(users_slug.items())) + ' users loaded')
|
|
||||||
|
|
||||||
export_list = [i for i in newdata.items() if i[1]['layout'] == 'article' and i[1]['published']]
|
|
||||||
export_list = sorted(export_list, key=lambda item: item[1]['createdAt'] or OLD_DATE, reverse=True)
|
|
||||||
print(str(len(export_list)) + ' filtered')
|
|
||||||
|
|
||||||
export_list = export_list[:limit or len(export_list)]
|
|
||||||
export_clean = {}
|
|
||||||
for (slug, article) in export_list:
|
|
||||||
if article['layout'] == 'article':
|
|
||||||
for author in article['authors']:
|
|
||||||
export_authors[author['slug']] = users_slug[author['slug']]
|
|
||||||
export_clean[article['slug']] = extract_images(article)
|
|
||||||
metadata = get_metadata(article)
|
|
||||||
content = frontmatter.dumps(frontmatter.Post(article['body'], **metadata))
|
|
||||||
open('../content/discours.io/'+slug+'.md', 'w').write(content)
|
|
||||||
# print(slug)
|
|
||||||
open('../content/discours.io/'+slug+'.html', 'w').write(content_dict[article['old_id']]['body'])
|
|
||||||
open('../src/data/articles.json', 'w').write(json.dumps(dict(export_clean),
|
|
||||||
cls=DateTimeEncoder,
|
cls=DateTimeEncoder,
|
||||||
indent=4,
|
indent=4,
|
||||||
sort_keys=True,
|
sort_keys=True,
|
||||||
ensure_ascii=False))
|
ensure_ascii=False))
|
||||||
print(str(len(export_clean.items())) + ' articles exported')
|
|
||||||
open('../src/data/authors.json', 'w').write(json.dumps(export_authors,
|
|
||||||
cls=DateTimeEncoder,
|
|
||||||
indent=4,
|
|
||||||
sort_keys=True,
|
|
||||||
ensure_ascii=False))
|
|
||||||
comments()
|
|
||||||
print(str(len(export_authors.items())) + ' total authors exported')
|
|
||||||
|
|
||||||
def export_slug(slug):
|
def shouts():
|
||||||
shouts_dict = json.loads(open('migration/data/shouts.dict.json').read())
|
''' migrating content items one by one '''
|
||||||
print(str(len(shouts_dict.items())) + ' migrated shouts loaded')
|
# limiting
|
||||||
users_old = json.loads(open('migration/data/users.dict.json').read()) # NOTE: this exact file is by old_id
|
limit = len(content_data)
|
||||||
print(str(len(users_old.items())) + ' migrated users loaded')
|
if len(sys.argv) > 2: limit = int(sys.argv[2])
|
||||||
users_dict = { x[1]['slug']:x for x in users_old.items() }
|
print('migrating %d content items...' % limit)
|
||||||
exported_authors = json.loads(open('../src/data/authors.json').read())
|
counter = 0
|
||||||
print(str(len(exported_authors.items())) + ' exported authors loaded')
|
discours_author = 0
|
||||||
exported_articles = json.loads(open('../src/data/articles.json').read())
|
errored = []
|
||||||
print(str(len(exported_articles.items())) + ' exported articles loaded')
|
|
||||||
shout = shouts_dict.get(slug, False)
|
# limiting
|
||||||
if shout:
|
try: limit = int(sys.argv[2]) if len(sys.argv) > 2 else len(content_data)
|
||||||
author = users_dict.get(shout['authors'][0]['slug'], None)
|
except ValueError: limit = len(content_data)
|
||||||
|
|
||||||
|
for entry in content_data[:limit]:
|
||||||
|
try:
|
||||||
|
shout = migrateShout(entry, users_by_oid, topics_by_cat)
|
||||||
|
author = shout['authors'][0]
|
||||||
|
shout['authors'] = [ author.id, ]
|
||||||
|
shouts_by_slug[shout['slug']] = shout
|
||||||
|
shouts_by_oid[entry['_id']] = shout
|
||||||
|
line = str(counter+1) + ': ' + shout['slug'] + " @" + str(author.slug)
|
||||||
|
counter += 1
|
||||||
|
if author.slug == 'discours': discours_author += 1
|
||||||
|
print(line)
|
||||||
|
# open('./shouts.id.log', 'a').write(line + '\n')
|
||||||
|
except Exception as e:
|
||||||
|
print(entry['_id'])
|
||||||
|
errored.append(entry)
|
||||||
|
raise e
|
||||||
|
open('migration/data/shouts.old_id.json','w').write(json.dumps(shouts_by_oid, cls=DateTimeEncoder))
|
||||||
|
open('migration/data/shouts.slug.json','w').write(json.dumps(shouts_by_slug, cls=DateTimeEncoder))
|
||||||
|
print(str(counter) + '/' + str(len(content_data)) + ' content items were migrated')
|
||||||
|
print(str(discours_author) + ' authored by @discours')
|
||||||
|
|
||||||
|
def export_shouts(shouts_by_slug, export_articles, export_authors):
|
||||||
|
# update what was just migrated or load json again
|
||||||
|
if len(export_authors.keys()) == 0:
|
||||||
|
export_authors = json.loads(open('../src/data/authors.json').read())
|
||||||
|
print(str(len(export_authors.items())) + ' exported authors loaded')
|
||||||
|
if len(export_articles.keys()) == 0:
|
||||||
|
export_articles = json.loads(open('../src/data/articles.json').read())
|
||||||
|
print(str(len(export_articles.items())) + ' exported articles loaded')
|
||||||
|
|
||||||
|
# limiting
|
||||||
|
limit = 33
|
||||||
|
if len(sys.argv) > 2: limit = int(sys.argv[2])
|
||||||
|
print('exporting %d articles to json...' % limit)
|
||||||
|
|
||||||
|
# filter
|
||||||
|
export_list = [i for i in shouts_by_slug.items() if i[1]['layout'] == 'article']
|
||||||
|
export_list = sorted(export_list, key=lambda item: item[1]['createdAt'] or OLD_DATE, reverse=True)
|
||||||
|
print(str(len(export_list)) + ' filtered')
|
||||||
|
export_list = export_list[:limit or len(export_list)]
|
||||||
|
|
||||||
|
for (slug, article) in export_list:
|
||||||
|
if article['layout'] == 'article':
|
||||||
|
export_slug(slug, export_articles, export_authors)
|
||||||
|
|
||||||
|
def export_body(article):
|
||||||
|
article = extract_images(article)
|
||||||
|
metadata = get_metadata(article)
|
||||||
|
content = frontmatter.dumps(frontmatter.Post(article['body'], **metadata))
|
||||||
|
open('../content/discours.io/'+slug+'.md', 'w').write(content)
|
||||||
|
open('../content/discours.io/'+slug+'.html', 'w').write(content_dict[article['old_id']]['body'])
|
||||||
|
|
||||||
|
def export_slug(slug, export_articles, export_authors):
|
||||||
|
if exported_authors == {}:
|
||||||
|
exported_authors = json.loads(open('../src/data/authors.json').read())
|
||||||
|
print(str(len(exported_authors.items())) + ' exported authors loaded')
|
||||||
|
if exported_articles == {}:
|
||||||
|
exported_articles = json.loads(open('../src/data/articles.json').read())
|
||||||
|
print(str(len(exported_articles.items())) + ' exported articles loaded')
|
||||||
|
|
||||||
|
shout = shouts_by_slug.get(slug, False)
|
||||||
|
assert shout, 'no data error'
|
||||||
|
author = users_by_slug.get(shout['authors'][0]['slug'], None)
|
||||||
exported_authors.update({shout['authors'][0]['slug']: author})
|
exported_authors.update({shout['authors'][0]['slug']: author})
|
||||||
exported_articles.update({shout['slug']: shout})
|
exported_articles.update({shout['slug']: shout})
|
||||||
print(shout)
|
export_body(shout)
|
||||||
open('../src/data/articles.json', 'w').write(json.dumps(exported_articles,
|
comments([slug, ])
|
||||||
|
|
||||||
|
|
||||||
|
def comments(sluglist = []):
|
||||||
|
''' migrating comments on content items one '''
|
||||||
|
if len(sluglist) == 0:
|
||||||
|
export_articles = json.loads(open('../src/data/articles.json').read())
|
||||||
|
print(str(len(export_articles.items())) + ' articles were exported before')
|
||||||
|
if len(sluglist) == 0: sluglist = list(export_articles.keys())
|
||||||
|
|
||||||
|
if len(sluglist) > 0:
|
||||||
|
print('exporting comments for exact articles...')
|
||||||
|
for slug in sluglist:
|
||||||
|
shout = shouts_by_slug[slug]
|
||||||
|
old_id = shout['old_id']
|
||||||
|
content_item = content_dict.get(old_id, {})
|
||||||
|
if content_item.get('commentedAt', False):
|
||||||
|
comments = [ migrateComment(c) for c in comments_by_post.get(old_id, []) ]
|
||||||
|
if len(comments) > 0:
|
||||||
|
export_comments[slug] = comments
|
||||||
|
sys.stdout.write('.')
|
||||||
|
else:
|
||||||
|
|
||||||
|
print('exporting comments for top 10 commented articles...')
|
||||||
|
comments_by_shoutslug = {}
|
||||||
|
for content_item in content_data:
|
||||||
|
old_id = content_item['_id']
|
||||||
|
if content_item.get('commentedAt', False):
|
||||||
|
comments = [ migrateComment(c) for c in comments_by_post.get(old_id, []) ]
|
||||||
|
if len(comments) > 0:
|
||||||
|
shout = shouts_by_oid.get(old_id, { 'slug': 'abandoned-comments' })
|
||||||
|
comments_by_shoutslug[shout['slug']] = comments
|
||||||
|
|
||||||
|
top = dict(sorted(comments_by_shoutslug.items(), reverse=True, key=lambda c: len(c[1]))[:10])
|
||||||
|
export_comments.update(top)
|
||||||
|
|
||||||
|
print(str(len(export_comments.keys())) + ' articls with comments exported\n')
|
||||||
|
|
||||||
|
|
||||||
|
def export_finish(export_articles = {}, export_authors = {}, export_topics = {}, export_comments = {}):
|
||||||
|
open('../src/data/authors.json', 'w').write(json.dumps(export_authors,
|
||||||
|
cls=DateTimeEncoder,
|
||||||
|
indent=4,
|
||||||
|
sort_keys=True,
|
||||||
|
ensure_ascii=False))
|
||||||
|
print(str(len(export_authors.items())) + ' authors exported')
|
||||||
|
open('../src/data/topics.json', 'w').write(json.dumps(export_topics,
|
||||||
cls=DateTimeEncoder,
|
cls=DateTimeEncoder,
|
||||||
indent=4,
|
indent=4,
|
||||||
sort_keys=True,
|
sort_keys=True,
|
||||||
ensure_ascii=False))
|
ensure_ascii=False))
|
||||||
open('../src/data/authors.json', 'w').write(json.dumps(exported_authors,
|
print(str(len(export_topics.keys())) + ' topics exported')
|
||||||
cls=DateTimeEncoder,
|
|
||||||
indent=4,
|
|
||||||
sort_keys=True,
|
|
||||||
ensure_ascii=False))
|
|
||||||
else:
|
|
||||||
print('no old id error!')
|
|
||||||
# print(str(len(shouts_dict)) + ' shouts were migrated')
|
|
||||||
print(slug)
|
|
||||||
comments()
|
|
||||||
print('finished.')
|
|
||||||
|
|
||||||
|
open('../src/data/articles.json', 'w').write(json.dumps(export_articles,
|
||||||
|
cls=DateTimeEncoder,
|
||||||
|
indent=4,
|
||||||
|
sort_keys=True,
|
||||||
|
ensure_ascii=False))
|
||||||
|
print(str(len(export_articles.items())) + ' articles exported')
|
||||||
|
open('../src/data/comments.json', 'w').write(json.dumps(export_comments,
|
||||||
|
cls=DateTimeEncoder,
|
||||||
|
indent=4,
|
||||||
|
sort_keys=True,
|
||||||
|
ensure_ascii=False))
|
||||||
|
print(str(len(export_comments.items())) + ' exported articles with comments')
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
import sys
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
if sys.argv[1] == "users":
|
cmd = sys.argv[1]
|
||||||
users()
|
if cmd == "users":
|
||||||
elif sys.argv[1] == "topics":
|
users(users_by_oid, users_by_slug, users_data, users_dict)
|
||||||
topics()
|
elif cmd == "topics":
|
||||||
elif sys.argv[1] == "shouts":
|
topics(topics_by_cat, topics_by_tag, topics_by_slug)
|
||||||
|
elif cmd == "shouts":
|
||||||
try:
|
try:
|
||||||
Community.create(**{
|
Community.create(**{
|
||||||
'slug': 'discours.io',
|
'slug': 'discours.io',
|
||||||
|
@ -284,21 +296,30 @@ if __name__ == '__main__':
|
||||||
})
|
})
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
shouts()
|
shouts(shouts_by_slug, shouts_by_oid) # NOTE: listens limit
|
||||||
elif sys.argv[1] == "comments":
|
elif cmd == "comments":
|
||||||
comments()
|
comments()
|
||||||
elif sys.argv[1] == "export_shouts":
|
elif cmd == "export_shouts":
|
||||||
limit = int(sys.argv[2]) if len(sys.argv) > 2 else None
|
export_shouts(shouts_by_slug, export_articles, export_authors)
|
||||||
export_shouts(limit)
|
elif cmd == "all":
|
||||||
elif sys.argv[1] == "all":
|
|
||||||
users()
|
users()
|
||||||
topics()
|
topics()
|
||||||
shouts()
|
shouts()
|
||||||
comments()
|
comments()
|
||||||
elif sys.argv[1] == "bson":
|
elif cmd == "bson":
|
||||||
from migration import bson2json
|
from migration import bson2json
|
||||||
bson2json.json_tables()
|
bson2json.json_tables()
|
||||||
elif sys.argv[1] == 'slug':
|
elif cmd == 'slug':
|
||||||
export_slug(sys.argv[2])
|
export_slug(sys.argv[2], export_articles, export_authors)
|
||||||
|
export_finish(export_articles, export_authors, export_topics, export_comments)
|
||||||
else:
|
else:
|
||||||
print('usage: python migrate.py bson\n.. \ttopics <limit>\n.. \tusers <limit>\n.. \tshouts <limit>\n.. \tcomments\n.. \texport_shouts <limit>\n.. \tslug <slug>\n.. \tall>')
|
print('''
|
||||||
|
usage: python migrate.py bson
|
||||||
|
\n.. \ttopics <limit>
|
||||||
|
\n.. \tusers <limit>
|
||||||
|
\n.. \tshouts <limit>
|
||||||
|
\n.. \tcomments
|
||||||
|
\n.. \texport_shouts <limit>
|
||||||
|
\n.. \tslug <slug>
|
||||||
|
\n.. \tall
|
||||||
|
''')
|
||||||
|
|
|
@ -27,6 +27,11 @@ import optparse, re, sys, codecs, types
|
||||||
try: from textwrap import wrap
|
try: from textwrap import wrap
|
||||||
except: pass
|
except: pass
|
||||||
|
|
||||||
|
#s upport the python3 API
|
||||||
|
if sys.version_info[0] == 3:
|
||||||
|
unichr = chr
|
||||||
|
xrange = range
|
||||||
|
|
||||||
# Use Unicode characters instead of their ascii psuedo-replacements
|
# Use Unicode characters instead of their ascii psuedo-replacements
|
||||||
UNICODE_SNOB = 0
|
UNICODE_SNOB = 0
|
||||||
|
|
||||||
|
@ -72,7 +77,7 @@ unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
|
||||||
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
|
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
|
||||||
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
|
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
|
||||||
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
|
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
|
||||||
'lrm':'', 'rlm':''}
|
'lrm':' ', 'rlm':' '}
|
||||||
|
|
||||||
unifiable_n = {}
|
unifiable_n = {}
|
||||||
|
|
||||||
|
@ -264,11 +269,7 @@ class HTML2Text(HTMLParser.HTMLParser):
|
||||||
else:
|
else:
|
||||||
nbsp = u' '
|
nbsp = u' '
|
||||||
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
|
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
|
||||||
self.outtext = self.outtext.replace('\n** **\n', '')
|
|
||||||
self.outtext = self.outtext.replace('\u200b', '')
|
|
||||||
self.outtext = self.outtext.replace('\x0a', ' ')
|
|
||||||
self.outtext = self.outtext.replace('\n\n', '\n')
|
|
||||||
self.outtext = self.outtext.replace('====', '')
|
|
||||||
return self.outtext
|
return self.outtext
|
||||||
|
|
||||||
def handle_charref(self, c):
|
def handle_charref(self, c):
|
||||||
|
@ -563,7 +564,7 @@ class HTML2Text(HTMLParser.HTMLParser):
|
||||||
nest_count = self.google_nest_count(tag_style)
|
nest_count = self.google_nest_count(tag_style)
|
||||||
else:
|
else:
|
||||||
nest_count = len(self.list)
|
nest_count = len(self.list)
|
||||||
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
|
self.o(" " * int(nest_count)) #TODO: line up <ol><li>s > 9 correctly.
|
||||||
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
|
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
|
||||||
elif li['name'] == "ol":
|
elif li['name'] == "ol":
|
||||||
li['num'] += 1
|
li['num'] += 1
|
||||||
|
@ -596,85 +597,86 @@ class HTML2Text(HTMLParser.HTMLParser):
|
||||||
if self.abbr_data is not None:
|
if self.abbr_data is not None:
|
||||||
self.abbr_data += data
|
self.abbr_data += data
|
||||||
|
|
||||||
# if not self.quiet:
|
if not self.quiet:
|
||||||
# if self.google_doc:
|
if self.google_doc:
|
||||||
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
|
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
|
||||||
lstripped_data = data.lstrip()
|
lstripped_data = data.lstrip()
|
||||||
if self.drop_white_space and not (self.pre or self.code):
|
if self.drop_white_space and not (self.pre or self.code):
|
||||||
data = lstripped_data
|
data = lstripped_data
|
||||||
if puredata: # and not self.pre:
|
if lstripped_data != '':
|
||||||
|
self.drop_white_space = 0
|
||||||
|
|
||||||
|
if puredata and not self.pre:
|
||||||
data = re.sub('\s+', ' ', data)
|
data = re.sub('\s+', ' ', data)
|
||||||
if data and data[0] == ' ':
|
if data and data[0] == ' ':
|
||||||
self.space = 1
|
self.space = 1
|
||||||
data = data[1:]
|
data = data[1:]
|
||||||
if lstripped_data != '':
|
if not data and not force: return
|
||||||
self.drop_white_space = 0
|
|
||||||
|
|
||||||
if not data and not force: return
|
if self.startpre:
|
||||||
|
#self.out(" :") #TODO: not output when already one there
|
||||||
|
if not data.startswith("\n"): # <pre>stuff...
|
||||||
|
data = "\n" + data
|
||||||
|
|
||||||
if self.startpre:
|
bq = (">" * self.blockquote)
|
||||||
#self.out(" :") #TODO: not output when already one there
|
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
|
||||||
if not data.startswith("\n"): # <pre>stuff...
|
|
||||||
data = "\n" + data
|
|
||||||
|
|
||||||
bq = (">" * self.blockquote)
|
if self.pre:
|
||||||
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
|
if not self.list:
|
||||||
|
bq += " "
|
||||||
|
#else: list content is already partially indented
|
||||||
|
for i in xrange(len(self.list)):
|
||||||
|
bq += " "
|
||||||
|
data = data.replace("\n", "\n"+bq)
|
||||||
|
|
||||||
if self.pre:
|
if self.startpre:
|
||||||
if not self.list:
|
self.startpre = 0
|
||||||
bq += " "
|
if self.list:
|
||||||
#else: list content is already partially indented
|
data = data.lstrip("\n") # use existing initial indentation
|
||||||
for i in range(len(self.list)):
|
|
||||||
bq += " "
|
|
||||||
data = data.replace("\n", "\n"+bq)
|
|
||||||
|
|
||||||
if self.startpre:
|
if self.start:
|
||||||
self.startpre = 0
|
self.space = 0
|
||||||
if self.list:
|
self.p_p = 0
|
||||||
data = data.lstrip("\n") # use existing initial indentation
|
self.start = 0
|
||||||
|
|
||||||
|
if force == 'end':
|
||||||
|
# It's the end.
|
||||||
|
self.p_p = 0
|
||||||
|
self.out("\n")
|
||||||
|
self.space = 0
|
||||||
|
|
||||||
|
if self.p_p:
|
||||||
|
self.out((self.br_toggle+'\n'+bq)*self.p_p)
|
||||||
|
self.space = 0
|
||||||
|
self.br_toggle = ''
|
||||||
|
|
||||||
|
if self.space:
|
||||||
|
if not self.lastWasNL: self.out(' ')
|
||||||
|
self.space = 0
|
||||||
|
|
||||||
|
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
|
||||||
|
if force == "end": self.out("\n")
|
||||||
|
|
||||||
|
newa = []
|
||||||
|
for link in self.a:
|
||||||
|
if self.outcount > link['outcount']:
|
||||||
|
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
|
||||||
|
if has_key(link, 'title'): self.out(" ("+link['title']+")")
|
||||||
|
self.out("\n")
|
||||||
|
else:
|
||||||
|
newa.append(link)
|
||||||
|
|
||||||
|
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
|
||||||
|
|
||||||
|
self.a = newa
|
||||||
|
|
||||||
|
if self.abbr_list and force == "end":
|
||||||
|
for abbr, definition in self.abbr_list.items():
|
||||||
|
self.out(" *[" + abbr + "]: " + definition + "\n")
|
||||||
|
|
||||||
if self.start:
|
|
||||||
self.space = 0
|
|
||||||
self.p_p = 0
|
self.p_p = 0
|
||||||
self.start = 0
|
self.out(data)
|
||||||
|
self.outcount += 1
|
||||||
if force == 'end':
|
|
||||||
# It's the end.
|
|
||||||
self.p_p = 0
|
|
||||||
self.out("\n")
|
|
||||||
self.space = 0
|
|
||||||
|
|
||||||
if self.p_p:
|
|
||||||
self.out((self.br_toggle+'\n'+bq)*self.p_p)
|
|
||||||
self.space = 0
|
|
||||||
self.br_toggle = ''
|
|
||||||
|
|
||||||
if self.space:
|
|
||||||
if not self.lastWasNL: self.out(' ')
|
|
||||||
self.space = 0
|
|
||||||
|
|
||||||
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
|
|
||||||
if force == "end": self.out("\n")
|
|
||||||
|
|
||||||
newa = []
|
|
||||||
for link in self.a:
|
|
||||||
if self.outcount > link['outcount']:
|
|
||||||
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
|
|
||||||
if has_key(link, 'title'): self.out(" ("+link['title']+")")
|
|
||||||
self.out("\n")
|
|
||||||
else:
|
|
||||||
newa.append(link)
|
|
||||||
|
|
||||||
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
|
|
||||||
|
|
||||||
self.a = newa
|
|
||||||
|
|
||||||
if self.abbr_list and force == "end":
|
|
||||||
for abbr, definition in self.abbr_list.items():
|
|
||||||
self.out(" *[" + abbr + "]: " + definition + "\n")
|
|
||||||
self.p_p = 0
|
|
||||||
self.out(data)
|
|
||||||
self.outcount += 1
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
def handle_data(self, data):
|
||||||
if r'\/script>' in data: self.quiet -= 1
|
if r'\/script>' in data: self.quiet -= 1
|
||||||
|
@ -789,8 +791,8 @@ md_dash_matcher = re.compile(r"""
|
||||||
^
|
^
|
||||||
(\s*)
|
(\s*)
|
||||||
(-)
|
(-)
|
||||||
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
|
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
|
||||||
# or another dash (header or hr)
|
# or another dash (header or hr)
|
||||||
""", flags=re.MULTILINE | re.VERBOSE)
|
""", flags=re.MULTILINE | re.VERBOSE)
|
||||||
slash_chars = r'\`*_{}[]()#+-.!'
|
slash_chars = r'\`*_{}[]()#+-.!'
|
||||||
md_backslash_matcher = re.compile(r'''
|
md_backslash_matcher = re.compile(r'''
|
||||||
|
@ -895,7 +897,7 @@ def main():
|
||||||
except ImportError:
|
except ImportError:
|
||||||
enc = lambda x, y: ('utf-8', 1)
|
enc = lambda x, y: ('utf-8', 1)
|
||||||
encoding = enc(j.headers, data)[0]
|
encoding = enc(j.headers, data)[0]
|
||||||
if encoding == 'us-ascii':
|
if encoding == 'en-ascii':
|
||||||
encoding = 'utf-8'
|
encoding = 'utf-8'
|
||||||
else:
|
else:
|
||||||
data = open(file_, 'rb').read()
|
data = open(file_, 'rb').read()
|
997
migration/html2text/__init__.py
Normal file
997
migration/html2text/__init__.py
Normal file
|
@ -0,0 +1,997 @@
|
||||||
|
"""html2text: Turn HTML into equivalent Markdown-structured text."""
|
||||||
|
|
||||||
|
import html.entities
|
||||||
|
import html.parser
|
||||||
|
import re
|
||||||
|
import string
|
||||||
|
import urllib.parse as urlparse
|
||||||
|
from textwrap import wrap
|
||||||
|
from typing import Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
|
from . import config
|
||||||
|
from .elements import AnchorElement, ListElement
|
||||||
|
from .typing import OutCallback
|
||||||
|
from .utils import (
|
||||||
|
dumb_css_parser,
|
||||||
|
element_style,
|
||||||
|
escape_md,
|
||||||
|
escape_md_section,
|
||||||
|
google_fixed_width_font,
|
||||||
|
google_has_height,
|
||||||
|
google_list_style,
|
||||||
|
google_text_emphasis,
|
||||||
|
hn,
|
||||||
|
list_numbering_start,
|
||||||
|
pad_tables_in_text,
|
||||||
|
skipwrap,
|
||||||
|
unifiable_n,
|
||||||
|
)
|
||||||
|
|
||||||
|
__version__ = (2020, 1, 16)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# Support decoded entities with UNIFIABLE.
|
||||||
|
|
||||||
|
|
||||||
|
class HTML2Text(html.parser.HTMLParser):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
out: Optional[OutCallback] = None,
|
||||||
|
baseurl: str = "",
|
||||||
|
bodywidth: int = config.BODY_WIDTH,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Input parameters:
|
||||||
|
out: possible custom replacement for self.outtextf (which
|
||||||
|
appends lines of text).
|
||||||
|
baseurl: base URL of the document we process
|
||||||
|
"""
|
||||||
|
super().__init__(convert_charrefs=False)
|
||||||
|
|
||||||
|
# Config options
|
||||||
|
self.split_next_td = False
|
||||||
|
self.td_count = 0
|
||||||
|
self.table_start = False
|
||||||
|
self.unicode_snob = config.UNICODE_SNOB # covered in cli
|
||||||
|
self.escape_snob = config.ESCAPE_SNOB # covered in cli
|
||||||
|
self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH
|
||||||
|
self.body_width = bodywidth # covered in cli
|
||||||
|
self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli
|
||||||
|
self.inline_links = config.INLINE_LINKS # covered in cli
|
||||||
|
self.protect_links = config.PROTECT_LINKS # covered in cli
|
||||||
|
self.google_list_indent = config.GOOGLE_LIST_INDENT # covered in cli
|
||||||
|
self.ignore_links = config.IGNORE_ANCHORS # covered in cli
|
||||||
|
self.ignore_mailto_links = config.IGNORE_MAILTO_LINKS # covered in cli
|
||||||
|
self.ignore_images = config.IGNORE_IMAGES # covered in cli
|
||||||
|
self.images_as_html = config.IMAGES_AS_HTML # covered in cli
|
||||||
|
self.images_to_alt = config.IMAGES_TO_ALT # covered in cli
|
||||||
|
self.images_with_size = config.IMAGES_WITH_SIZE # covered in cli
|
||||||
|
self.ignore_emphasis = config.IGNORE_EMPHASIS # covered in cli
|
||||||
|
self.bypass_tables = config.BYPASS_TABLES # covered in cli
|
||||||
|
self.ignore_tables = config.IGNORE_TABLES # covered in cli
|
||||||
|
self.google_doc = False # covered in cli
|
||||||
|
self.ul_item_mark = "*" # covered in cli
|
||||||
|
self.emphasis_mark = "_" # covered in cli
|
||||||
|
self.strong_mark = "**"
|
||||||
|
self.single_line_break = config.SINGLE_LINE_BREAK # covered in cli
|
||||||
|
self.use_automatic_links = config.USE_AUTOMATIC_LINKS # covered in cli
|
||||||
|
self.hide_strikethrough = False # covered in cli
|
||||||
|
self.mark_code = config.MARK_CODE
|
||||||
|
self.wrap_list_items = config.WRAP_LIST_ITEMS # covered in cli
|
||||||
|
self.wrap_links = config.WRAP_LINKS # covered in cli
|
||||||
|
self.wrap_tables = config.WRAP_TABLES
|
||||||
|
self.pad_tables = config.PAD_TABLES # covered in cli
|
||||||
|
self.default_image_alt = config.DEFAULT_IMAGE_ALT # covered in cli
|
||||||
|
self.tag_callback = None
|
||||||
|
self.open_quote = config.OPEN_QUOTE # covered in cli
|
||||||
|
self.close_quote = config.CLOSE_QUOTE # covered in cli
|
||||||
|
|
||||||
|
if out is None:
|
||||||
|
self.out = self.outtextf
|
||||||
|
else:
|
||||||
|
self.out = out
|
||||||
|
|
||||||
|
# empty list to store output characters before they are "joined"
|
||||||
|
self.outtextlist = [] # type: List[str]
|
||||||
|
|
||||||
|
self.quiet = 0
|
||||||
|
self.p_p = 0 # number of newline character to print before next output
|
||||||
|
self.outcount = 0
|
||||||
|
self.start = True
|
||||||
|
self.space = False
|
||||||
|
self.a = [] # type: List[AnchorElement]
|
||||||
|
self.astack = [] # type: List[Optional[Dict[str, Optional[str]]]]
|
||||||
|
self.maybe_automatic_link = None # type: Optional[str]
|
||||||
|
self.empty_link = False
|
||||||
|
self.absolute_url_matcher = re.compile(r"^[a-zA-Z+]+://")
|
||||||
|
self.acount = 0
|
||||||
|
self.list = [] # type: List[ListElement]
|
||||||
|
self.blockquote = 0
|
||||||
|
self.pre = False
|
||||||
|
self.startpre = False
|
||||||
|
self.code = False
|
||||||
|
self.quote = False
|
||||||
|
self.br_toggle = ""
|
||||||
|
self.lastWasNL = False
|
||||||
|
self.lastWasList = False
|
||||||
|
self.style = 0
|
||||||
|
self.style_def = {} # type: Dict[str, Dict[str, str]]
|
||||||
|
self.tag_stack = (
|
||||||
|
[]
|
||||||
|
) # type: List[Tuple[str, Dict[str, Optional[str]], Dict[str, str]]]
|
||||||
|
self.emphasis = 0
|
||||||
|
self.drop_white_space = 0
|
||||||
|
self.inheader = False
|
||||||
|
# Current abbreviation definition
|
||||||
|
self.abbr_title = None # type: Optional[str]
|
||||||
|
# Last inner HTML (for abbr being defined)
|
||||||
|
self.abbr_data = None # type: Optional[str]
|
||||||
|
# Stack of abbreviations to write later
|
||||||
|
self.abbr_list = {} # type: Dict[str, str]
|
||||||
|
self.baseurl = baseurl
|
||||||
|
self.stressed = False
|
||||||
|
self.preceding_stressed = False
|
||||||
|
self.preceding_data = ""
|
||||||
|
self.current_tag = ""
|
||||||
|
|
||||||
|
config.UNIFIABLE["nbsp"] = " _place_holder;"
|
||||||
|
|
||||||
|
def feed(self, data: str) -> None:
|
||||||
|
data = data.replace("</' + 'script>", "</ignore>")
|
||||||
|
super().feed(data)
|
||||||
|
|
||||||
|
def handle(self, data: str) -> str:
|
||||||
|
self.feed(data)
|
||||||
|
self.feed("")
|
||||||
|
markdown = self.optwrap(self.finish())
|
||||||
|
if self.pad_tables:
|
||||||
|
return pad_tables_in_text(markdown)
|
||||||
|
else:
|
||||||
|
return markdown
|
||||||
|
|
||||||
|
def outtextf(self, s: str) -> None:
|
||||||
|
self.outtextlist.append(s)
|
||||||
|
if s:
|
||||||
|
self.lastWasNL = s[-1] == "\n"
|
||||||
|
|
||||||
|
def finish(self) -> str:
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
self.pbr()
|
||||||
|
self.o("", force="end")
|
||||||
|
|
||||||
|
outtext = "".join(self.outtextlist)
|
||||||
|
|
||||||
|
if self.unicode_snob:
|
||||||
|
nbsp = html.entities.html5["nbsp;"]
|
||||||
|
else:
|
||||||
|
nbsp = " "
|
||||||
|
outtext = outtext.replace(" _place_holder;", nbsp)
|
||||||
|
|
||||||
|
# Clear self.outtextlist to avoid memory leak of its content to
|
||||||
|
# the next handling.
|
||||||
|
self.outtextlist = []
|
||||||
|
|
||||||
|
return outtext
|
||||||
|
|
||||||
|
def handle_charref(self, c: str) -> None:
|
||||||
|
self.handle_data(self.charref(c), True)
|
||||||
|
|
||||||
|
def handle_entityref(self, c: str) -> None:
|
||||||
|
ref = self.entityref(c)
|
||||||
|
|
||||||
|
# ref may be an empty string (e.g. for ‎/‏ markers that should
|
||||||
|
# not contribute to the final output).
|
||||||
|
# self.handle_data cannot handle a zero-length string right after a
|
||||||
|
# stressed tag or mid-text within a stressed tag (text get split and
|
||||||
|
# self.stressed/self.preceding_stressed gets switched after the first
|
||||||
|
# part of that text).
|
||||||
|
if ref:
|
||||||
|
self.handle_data(ref, True)
|
||||||
|
|
||||||
|
def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
|
||||||
|
self.handle_tag(tag, dict(attrs), start=True)
|
||||||
|
|
||||||
|
def handle_endtag(self, tag: str) -> None:
|
||||||
|
self.handle_tag(tag, {}, start=False)
|
||||||
|
|
||||||
|
def previousIndex(self, attrs: Dict[str, Optional[str]]) -> Optional[int]:
|
||||||
|
"""
|
||||||
|
:type attrs: dict
|
||||||
|
|
||||||
|
:returns: The index of certain set of attributes (of a link) in the
|
||||||
|
self.a list. If the set of attributes is not found, returns None
|
||||||
|
:rtype: int
|
||||||
|
"""
|
||||||
|
if "href" not in attrs:
|
||||||
|
return None
|
||||||
|
|
||||||
|
match = False
|
||||||
|
for i, a in enumerate(self.a):
|
||||||
|
if "href" in a.attrs and a.attrs["href"] == attrs["href"]:
|
||||||
|
if "title" in a.attrs or "title" in attrs:
|
||||||
|
if (
|
||||||
|
"title" in a.attrs
|
||||||
|
and "title" in attrs
|
||||||
|
and a.attrs["title"] == attrs["title"]
|
||||||
|
):
|
||||||
|
match = True
|
||||||
|
else:
|
||||||
|
match = True
|
||||||
|
|
||||||
|
if match:
|
||||||
|
return i
|
||||||
|
return None
|
||||||
|
|
||||||
|
def handle_emphasis(
|
||||||
|
self, start: bool, tag_style: Dict[str, str], parent_style: Dict[str, str]
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Handles various text emphases
|
||||||
|
"""
|
||||||
|
tag_emphasis = google_text_emphasis(tag_style)
|
||||||
|
parent_emphasis = google_text_emphasis(parent_style)
|
||||||
|
|
||||||
|
# handle Google's text emphasis
|
||||||
|
strikethrough = "line-through" in tag_emphasis and self.hide_strikethrough
|
||||||
|
|
||||||
|
# google and others may mark a font's weight as `bold` or `700`
|
||||||
|
bold = False
|
||||||
|
for bold_marker in config.BOLD_TEXT_STYLE_VALUES:
|
||||||
|
bold = bold_marker in tag_emphasis and bold_marker not in parent_emphasis
|
||||||
|
if bold:
|
||||||
|
break
|
||||||
|
|
||||||
|
italic = "italic" in tag_emphasis and "italic" not in parent_emphasis
|
||||||
|
fixed = (
|
||||||
|
google_fixed_width_font(tag_style)
|
||||||
|
and not google_fixed_width_font(parent_style)
|
||||||
|
and not self.pre
|
||||||
|
)
|
||||||
|
|
||||||
|
if start:
|
||||||
|
# crossed-out text must be handled before other attributes
|
||||||
|
# in order not to output qualifiers unnecessarily
|
||||||
|
if bold or italic or fixed:
|
||||||
|
self.emphasis += 1
|
||||||
|
if strikethrough:
|
||||||
|
self.quiet += 1
|
||||||
|
if italic:
|
||||||
|
self.o(self.emphasis_mark)
|
||||||
|
self.drop_white_space += 1
|
||||||
|
if bold:
|
||||||
|
self.o(self.strong_mark)
|
||||||
|
self.drop_white_space += 1
|
||||||
|
if fixed:
|
||||||
|
self.o("`")
|
||||||
|
self.drop_white_space += 1
|
||||||
|
self.code = True
|
||||||
|
else:
|
||||||
|
if bold or italic or fixed:
|
||||||
|
# there must not be whitespace before closing emphasis mark
|
||||||
|
self.emphasis -= 1
|
||||||
|
self.space = False
|
||||||
|
if fixed:
|
||||||
|
if self.drop_white_space:
|
||||||
|
# empty emphasis, drop it
|
||||||
|
self.drop_white_space -= 1
|
||||||
|
else:
|
||||||
|
self.o("`")
|
||||||
|
self.code = False
|
||||||
|
if bold:
|
||||||
|
if self.drop_white_space:
|
||||||
|
# empty emphasis, drop it
|
||||||
|
self.drop_white_space -= 1
|
||||||
|
else:
|
||||||
|
self.o(self.strong_mark)
|
||||||
|
if italic:
|
||||||
|
if self.drop_white_space:
|
||||||
|
# empty emphasis, drop it
|
||||||
|
self.drop_white_space -= 1
|
||||||
|
else:
|
||||||
|
self.o(self.emphasis_mark)
|
||||||
|
# space is only allowed after *all* emphasis marks
|
||||||
|
if (bold or italic) and not self.emphasis:
|
||||||
|
self.o(" ")
|
||||||
|
if strikethrough:
|
||||||
|
self.quiet -= 1
|
||||||
|
|
||||||
|
def handle_tag(
|
||||||
|
self, tag: str, attrs: Dict[str, Optional[str]], start: bool
|
||||||
|
) -> None:
|
||||||
|
self.current_tag = tag
|
||||||
|
|
||||||
|
if self.tag_callback is not None:
|
||||||
|
if self.tag_callback(self, tag, attrs, start) is True:
|
||||||
|
return
|
||||||
|
|
||||||
|
# first thing inside the anchor tag is another tag
|
||||||
|
# that produces some output
|
||||||
|
if (
|
||||||
|
start
|
||||||
|
and self.maybe_automatic_link is not None
|
||||||
|
and tag not in ["p", "div", "style", "dl", "dt"]
|
||||||
|
and (tag != "img" or self.ignore_images)
|
||||||
|
):
|
||||||
|
self.o("[")
|
||||||
|
self.maybe_automatic_link = None
|
||||||
|
self.empty_link = False
|
||||||
|
|
||||||
|
if self.google_doc:
|
||||||
|
# the attrs parameter is empty for a closing tag. in addition, we
|
||||||
|
# need the attributes of the parent nodes in order to get a
|
||||||
|
# complete style description for the current element. we assume
|
||||||
|
# that google docs export well formed html.
|
||||||
|
parent_style = {} # type: Dict[str, str]
|
||||||
|
if start:
|
||||||
|
if self.tag_stack:
|
||||||
|
parent_style = self.tag_stack[-1][2]
|
||||||
|
tag_style = element_style(attrs, self.style_def, parent_style)
|
||||||
|
self.tag_stack.append((tag, attrs, tag_style))
|
||||||
|
else:
|
||||||
|
dummy, attrs, tag_style = (
|
||||||
|
self.tag_stack.pop() if self.tag_stack else (None, {}, {})
|
||||||
|
)
|
||||||
|
if self.tag_stack:
|
||||||
|
parent_style = self.tag_stack[-1][2]
|
||||||
|
|
||||||
|
if hn(tag):
|
||||||
|
# check if nh is inside of an 'a' tag (incorrect but found in the wild)
|
||||||
|
if self.astack:
|
||||||
|
if start:
|
||||||
|
self.inheader = True
|
||||||
|
# are inside link name, so only add '#' if it can appear before '['
|
||||||
|
if self.outtextlist and self.outtextlist[-1] == "[":
|
||||||
|
self.outtextlist.pop()
|
||||||
|
self.space = False
|
||||||
|
self.o(hn(tag) * "#" + " ")
|
||||||
|
self.o("[")
|
||||||
|
else:
|
||||||
|
self.p_p = 0 # don't break up link name
|
||||||
|
self.inheader = False
|
||||||
|
return # prevent redundant emphasis marks on headers
|
||||||
|
else:
|
||||||
|
self.p()
|
||||||
|
if start:
|
||||||
|
self.inheader = True
|
||||||
|
self.o(hn(tag) * "#" + " ")
|
||||||
|
else:
|
||||||
|
self.inheader = False
|
||||||
|
return # prevent redundant emphasis marks on headers
|
||||||
|
|
||||||
|
if tag in ["p", "div"]:
|
||||||
|
if self.google_doc:
|
||||||
|
if start and google_has_height(tag_style):
|
||||||
|
self.p()
|
||||||
|
else:
|
||||||
|
self.soft_br()
|
||||||
|
elif self.astack:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.p()
|
||||||
|
|
||||||
|
if tag == "br" and start:
|
||||||
|
if self.blockquote > 0:
|
||||||
|
self.o(" \n> ")
|
||||||
|
else:
|
||||||
|
self.o(" \n")
|
||||||
|
|
||||||
|
if tag == "hr" and start:
|
||||||
|
self.p()
|
||||||
|
self.o("* * *")
|
||||||
|
self.p()
|
||||||
|
|
||||||
|
if tag in ["head", "style", "script"]:
|
||||||
|
if start:
|
||||||
|
self.quiet += 1
|
||||||
|
else:
|
||||||
|
self.quiet -= 1
|
||||||
|
|
||||||
|
if tag == "style":
|
||||||
|
if start:
|
||||||
|
self.style += 1
|
||||||
|
else:
|
||||||
|
self.style -= 1
|
||||||
|
|
||||||
|
if tag in ["body"]:
|
||||||
|
self.quiet = 0 # sites like 9rules.com never close <head>
|
||||||
|
|
||||||
|
if tag == "blockquote":
|
||||||
|
if start:
|
||||||
|
self.p()
|
||||||
|
self.o("> ", force=True)
|
||||||
|
self.start = True
|
||||||
|
self.blockquote += 1
|
||||||
|
else:
|
||||||
|
self.blockquote -= 1
|
||||||
|
self.p()
|
||||||
|
|
||||||
|
if tag in ["em", "i", "u"] and not self.ignore_emphasis:
|
||||||
|
# Separate with a space if we immediately follow an alphanumeric
|
||||||
|
# character, since otherwise Markdown won't render the emphasis
|
||||||
|
# marks, and we'll be left with eg 'foo_bar_' visible.
|
||||||
|
# (Don't add a space otherwise, though, since there isn't one in the
|
||||||
|
# original HTML.)
|
||||||
|
if (
|
||||||
|
start
|
||||||
|
and self.preceding_data
|
||||||
|
and self.preceding_data[-1] not in string.whitespace
|
||||||
|
and self.preceding_data[-1] not in string.punctuation
|
||||||
|
):
|
||||||
|
emphasis = " " + self.emphasis_mark
|
||||||
|
self.preceding_data += " "
|
||||||
|
else:
|
||||||
|
emphasis = self.emphasis_mark
|
||||||
|
|
||||||
|
self.o(emphasis)
|
||||||
|
if start:
|
||||||
|
self.stressed = True
|
||||||
|
|
||||||
|
if tag in ["strong", "b"] and not self.ignore_emphasis:
|
||||||
|
# Separate with space if we immediately follow an * character, since
|
||||||
|
# without it, Markdown won't render the resulting *** correctly.
|
||||||
|
# (Don't add a space otherwise, though, since there isn't one in the
|
||||||
|
# original HTML.)
|
||||||
|
if (
|
||||||
|
start
|
||||||
|
and self.preceding_data
|
||||||
|
and self.preceding_data[-1] == self.strong_mark[0]
|
||||||
|
):
|
||||||
|
strong = " " + self.strong_mark
|
||||||
|
self.preceding_data += " "
|
||||||
|
else:
|
||||||
|
strong = self.strong_mark
|
||||||
|
|
||||||
|
self.o(strong)
|
||||||
|
if start:
|
||||||
|
self.stressed = True
|
||||||
|
|
||||||
|
if tag in ["del", "strike", "s"]:
|
||||||
|
if start and self.preceding_data and self.preceding_data[-1] == "~":
|
||||||
|
strike = " ~~"
|
||||||
|
self.preceding_data += " "
|
||||||
|
else:
|
||||||
|
strike = "~~"
|
||||||
|
|
||||||
|
self.o(strike)
|
||||||
|
if start:
|
||||||
|
self.stressed = True
|
||||||
|
|
||||||
|
if self.google_doc:
|
||||||
|
if not self.inheader:
|
||||||
|
# handle some font attributes, but leave headers clean
|
||||||
|
self.handle_emphasis(start, tag_style, parent_style)
|
||||||
|
|
||||||
|
if tag in ["kbd", "code", "tt"] and not self.pre:
|
||||||
|
self.o("`") # TODO: `` `this` ``
|
||||||
|
self.code = not self.code
|
||||||
|
|
||||||
|
if tag == "abbr":
|
||||||
|
if start:
|
||||||
|
self.abbr_title = None
|
||||||
|
self.abbr_data = ""
|
||||||
|
if "title" in attrs:
|
||||||
|
self.abbr_title = attrs["title"]
|
||||||
|
else:
|
||||||
|
if self.abbr_title is not None:
|
||||||
|
assert self.abbr_data is not None
|
||||||
|
self.abbr_list[self.abbr_data] = self.abbr_title
|
||||||
|
self.abbr_title = None
|
||||||
|
self.abbr_data = None
|
||||||
|
|
||||||
|
if tag == "q":
|
||||||
|
if not self.quote:
|
||||||
|
self.o(self.open_quote)
|
||||||
|
else:
|
||||||
|
self.o(self.close_quote)
|
||||||
|
self.quote = not self.quote
|
||||||
|
|
||||||
|
def link_url(self: HTML2Text, link: str, title: str = "") -> None:
|
||||||
|
url = urlparse.urljoin(self.baseurl, link)
|
||||||
|
title = ' "{}"'.format(title) if title.strip() else ""
|
||||||
|
self.o("]({url}{title})".format(url=escape_md(url), title=title))
|
||||||
|
|
||||||
|
if tag == "a" and not self.ignore_links:
|
||||||
|
if start:
|
||||||
|
if (
|
||||||
|
"href" in attrs
|
||||||
|
and attrs["href"] is not None
|
||||||
|
and not (self.skip_internal_links and attrs["href"].startswith("#"))
|
||||||
|
and not (
|
||||||
|
self.ignore_mailto_links and attrs["href"].startswith("mailto:")
|
||||||
|
)
|
||||||
|
):
|
||||||
|
self.astack.append(attrs)
|
||||||
|
self.maybe_automatic_link = attrs["href"]
|
||||||
|
self.empty_link = True
|
||||||
|
if self.protect_links:
|
||||||
|
attrs["href"] = "<" + attrs["href"] + ">"
|
||||||
|
else:
|
||||||
|
self.astack.append(None)
|
||||||
|
else:
|
||||||
|
if self.astack:
|
||||||
|
a = self.astack.pop()
|
||||||
|
if self.maybe_automatic_link and not self.empty_link:
|
||||||
|
self.maybe_automatic_link = None
|
||||||
|
elif a:
|
||||||
|
assert a["href"] is not None
|
||||||
|
if self.empty_link:
|
||||||
|
self.o("[")
|
||||||
|
self.empty_link = False
|
||||||
|
self.maybe_automatic_link = None
|
||||||
|
if self.inline_links:
|
||||||
|
self.p_p = 0
|
||||||
|
title = a.get("title") or ""
|
||||||
|
title = escape_md(title)
|
||||||
|
link_url(self, a["href"], title)
|
||||||
|
else:
|
||||||
|
i = self.previousIndex(a)
|
||||||
|
if i is not None:
|
||||||
|
a_props = self.a[i]
|
||||||
|
else:
|
||||||
|
self.acount += 1
|
||||||
|
a_props = AnchorElement(a, self.acount, self.outcount)
|
||||||
|
self.a.append(a_props)
|
||||||
|
self.o("][" + str(a_props.count) + "]")
|
||||||
|
|
||||||
|
if tag == "img" and start and not self.ignore_images:
|
||||||
|
if "src" in attrs:
|
||||||
|
assert attrs["src"] is not None
|
||||||
|
if not self.images_to_alt:
|
||||||
|
attrs["href"] = attrs["src"]
|
||||||
|
alt = attrs.get("alt") or self.default_image_alt
|
||||||
|
|
||||||
|
# If we have images_with_size, write raw html including width,
|
||||||
|
# height, and alt attributes
|
||||||
|
if self.images_as_html or (
|
||||||
|
self.images_with_size and ("width" in attrs or "height" in attrs)
|
||||||
|
):
|
||||||
|
self.o("<img src='" + attrs["src"] + "' ")
|
||||||
|
if "width" in attrs:
|
||||||
|
assert attrs["width"] is not None
|
||||||
|
self.o("width='" + attrs["width"] + "' ")
|
||||||
|
if "height" in attrs:
|
||||||
|
assert attrs["height"] is not None
|
||||||
|
self.o("height='" + attrs["height"] + "' ")
|
||||||
|
if alt:
|
||||||
|
self.o("alt='" + alt + "' ")
|
||||||
|
self.o("/>")
|
||||||
|
return
|
||||||
|
|
||||||
|
# If we have a link to create, output the start
|
||||||
|
if self.maybe_automatic_link is not None:
|
||||||
|
href = self.maybe_automatic_link
|
||||||
|
if (
|
||||||
|
self.images_to_alt
|
||||||
|
and escape_md(alt) == href
|
||||||
|
and self.absolute_url_matcher.match(href)
|
||||||
|
):
|
||||||
|
self.o("<" + escape_md(alt) + ">")
|
||||||
|
self.empty_link = False
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.o("[")
|
||||||
|
self.maybe_automatic_link = None
|
||||||
|
self.empty_link = False
|
||||||
|
|
||||||
|
# If we have images_to_alt, we discard the image itself,
|
||||||
|
# considering only the alt text.
|
||||||
|
if self.images_to_alt:
|
||||||
|
self.o(escape_md(alt))
|
||||||
|
else:
|
||||||
|
self.o("![" + escape_md(alt) + "]")
|
||||||
|
if self.inline_links:
|
||||||
|
href = attrs.get("href") or ""
|
||||||
|
self.o(
|
||||||
|
"(" + escape_md(urlparse.urljoin(self.baseurl, href)) + ")"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
i = self.previousIndex(attrs)
|
||||||
|
if i is not None:
|
||||||
|
a_props = self.a[i]
|
||||||
|
else:
|
||||||
|
self.acount += 1
|
||||||
|
a_props = AnchorElement(attrs, self.acount, self.outcount)
|
||||||
|
self.a.append(a_props)
|
||||||
|
self.o("[" + str(a_props.count) + "]")
|
||||||
|
|
||||||
|
if tag == "dl" and start:
|
||||||
|
self.p()
|
||||||
|
if tag == "dt" and not start:
|
||||||
|
self.pbr()
|
||||||
|
if tag == "dd" and start:
|
||||||
|
self.o(" ")
|
||||||
|
if tag == "dd" and not start:
|
||||||
|
self.pbr()
|
||||||
|
|
||||||
|
if tag in ["ol", "ul"]:
|
||||||
|
# Google Docs create sub lists as top level lists
|
||||||
|
if not self.list and not self.lastWasList:
|
||||||
|
self.p()
|
||||||
|
if start:
|
||||||
|
if self.google_doc:
|
||||||
|
list_style = google_list_style(tag_style)
|
||||||
|
else:
|
||||||
|
list_style = tag
|
||||||
|
numbering_start = list_numbering_start(attrs)
|
||||||
|
self.list.append(ListElement(list_style, numbering_start))
|
||||||
|
else:
|
||||||
|
if self.list:
|
||||||
|
self.list.pop()
|
||||||
|
if not self.google_doc and not self.list:
|
||||||
|
self.o("\n")
|
||||||
|
self.lastWasList = True
|
||||||
|
else:
|
||||||
|
self.lastWasList = False
|
||||||
|
|
||||||
|
if tag == "li":
|
||||||
|
self.pbr()
|
||||||
|
if start:
|
||||||
|
if self.list:
|
||||||
|
li = self.list[-1]
|
||||||
|
else:
|
||||||
|
li = ListElement("ul", 0)
|
||||||
|
if self.google_doc:
|
||||||
|
self.o(" " * self.google_nest_count(tag_style))
|
||||||
|
else:
|
||||||
|
# Indent two spaces per list, except use three spaces for an
|
||||||
|
# unordered list inside an ordered list.
|
||||||
|
# https://spec.commonmark.org/0.28/#motivation
|
||||||
|
# TODO: line up <ol><li>s > 9 correctly.
|
||||||
|
parent_list = None
|
||||||
|
for list in self.list:
|
||||||
|
self.o(
|
||||||
|
" " if parent_list == "ol" and list.name == "ul" else " "
|
||||||
|
)
|
||||||
|
parent_list = list.name
|
||||||
|
|
||||||
|
if li.name == "ul":
|
||||||
|
self.o(self.ul_item_mark + " ")
|
||||||
|
elif li.name == "ol":
|
||||||
|
li.num += 1
|
||||||
|
self.o(str(li.num) + ". ")
|
||||||
|
self.start = True
|
||||||
|
|
||||||
|
if tag in ["table", "tr", "td", "th"]:
|
||||||
|
if self.ignore_tables:
|
||||||
|
if tag == "tr":
|
||||||
|
if start:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.soft_br()
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
elif self.bypass_tables:
|
||||||
|
if start:
|
||||||
|
self.soft_br()
|
||||||
|
if tag in ["td", "th"]:
|
||||||
|
if start:
|
||||||
|
self.o("<{}>\n\n".format(tag))
|
||||||
|
else:
|
||||||
|
self.o("\n</{}>".format(tag))
|
||||||
|
else:
|
||||||
|
if start:
|
||||||
|
self.o("<{}>".format(tag))
|
||||||
|
else:
|
||||||
|
self.o("</{}>".format(tag))
|
||||||
|
|
||||||
|
else:
|
||||||
|
if tag == "table":
|
||||||
|
if start:
|
||||||
|
self.table_start = True
|
||||||
|
if self.pad_tables:
|
||||||
|
self.o("<" + config.TABLE_MARKER_FOR_PAD + ">")
|
||||||
|
self.o(" \n")
|
||||||
|
else:
|
||||||
|
if self.pad_tables:
|
||||||
|
# add break in case the table is empty or its 1 row table
|
||||||
|
self.soft_br()
|
||||||
|
self.o("</" + config.TABLE_MARKER_FOR_PAD + ">")
|
||||||
|
self.o(" \n")
|
||||||
|
if tag in ["td", "th"] and start:
|
||||||
|
if self.split_next_td:
|
||||||
|
self.o("| ")
|
||||||
|
self.split_next_td = True
|
||||||
|
|
||||||
|
if tag == "tr" and start:
|
||||||
|
self.td_count = 0
|
||||||
|
if tag == "tr" and not start:
|
||||||
|
self.split_next_td = False
|
||||||
|
self.soft_br()
|
||||||
|
if tag == "tr" and not start and self.table_start:
|
||||||
|
# Underline table header
|
||||||
|
self.o("|".join(["---"] * self.td_count))
|
||||||
|
self.soft_br()
|
||||||
|
self.table_start = False
|
||||||
|
if tag in ["td", "th"] and start:
|
||||||
|
self.td_count += 1
|
||||||
|
|
||||||
|
if tag == "pre":
|
||||||
|
if start:
|
||||||
|
self.startpre = True
|
||||||
|
self.pre = True
|
||||||
|
else:
|
||||||
|
self.pre = False
|
||||||
|
if self.mark_code:
|
||||||
|
self.out("\n[/code]")
|
||||||
|
self.p()
|
||||||
|
|
||||||
|
# TODO: Add docstring for these one letter functions
|
||||||
|
def pbr(self) -> None:
|
||||||
|
"Pretty print has a line break"
|
||||||
|
if self.p_p == 0:
|
||||||
|
self.p_p = 1
|
||||||
|
|
||||||
|
def p(self) -> None:
|
||||||
|
"Set pretty print to 1 or 2 lines"
|
||||||
|
self.p_p = 1 if self.single_line_break else 2
|
||||||
|
|
||||||
|
def soft_br(self) -> None:
|
||||||
|
"Soft breaks"
|
||||||
|
self.pbr()
|
||||||
|
self.br_toggle = " "
|
||||||
|
|
||||||
|
def o(
|
||||||
|
self, data: str, puredata: bool = False, force: Union[bool, str] = False
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Deal with indentation and whitespace
|
||||||
|
"""
|
||||||
|
if self.abbr_data is not None:
|
||||||
|
self.abbr_data += data
|
||||||
|
|
||||||
|
if not self.quiet:
|
||||||
|
if self.google_doc:
|
||||||
|
# prevent white space immediately after 'begin emphasis'
|
||||||
|
# marks ('**' and '_')
|
||||||
|
lstripped_data = data.lstrip()
|
||||||
|
if self.drop_white_space and not (self.pre or self.code):
|
||||||
|
data = lstripped_data
|
||||||
|
if lstripped_data != "":
|
||||||
|
self.drop_white_space = 0
|
||||||
|
|
||||||
|
if puredata and not self.pre:
|
||||||
|
# This is a very dangerous call ... it could mess up
|
||||||
|
# all handling of when not handled properly
|
||||||
|
# (see entityref)
|
||||||
|
data = re.sub(r"\s+", r" ", data)
|
||||||
|
if data and data[0] == " ":
|
||||||
|
self.space = True
|
||||||
|
data = data[1:]
|
||||||
|
if not data and not force:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.startpre:
|
||||||
|
# self.out(" :") #TODO: not output when already one there
|
||||||
|
if not data.startswith("\n") and not data.startswith("\r\n"):
|
||||||
|
# <pre>stuff...
|
||||||
|
data = "\n" + data
|
||||||
|
if self.mark_code:
|
||||||
|
self.out("\n[code]")
|
||||||
|
self.p_p = 0
|
||||||
|
|
||||||
|
bq = ">" * self.blockquote
|
||||||
|
if not (force and data and data[0] == ">") and self.blockquote:
|
||||||
|
bq += " "
|
||||||
|
|
||||||
|
if self.pre:
|
||||||
|
if not self.list:
|
||||||
|
bq += " "
|
||||||
|
# else: list content is already partially indented
|
||||||
|
bq += " " * len(self.list)
|
||||||
|
data = data.replace("\n", "\n" + bq)
|
||||||
|
|
||||||
|
if self.startpre:
|
||||||
|
self.startpre = False
|
||||||
|
if self.list:
|
||||||
|
# use existing initial indentation
|
||||||
|
data = data.lstrip("\n")
|
||||||
|
|
||||||
|
if self.start:
|
||||||
|
self.space = False
|
||||||
|
self.p_p = 0
|
||||||
|
self.start = False
|
||||||
|
|
||||||
|
if force == "end":
|
||||||
|
# It's the end.
|
||||||
|
self.p_p = 0
|
||||||
|
self.out("\n")
|
||||||
|
self.space = False
|
||||||
|
|
||||||
|
if self.p_p:
|
||||||
|
self.out((self.br_toggle + "\n" + bq) * self.p_p)
|
||||||
|
self.space = False
|
||||||
|
self.br_toggle = ""
|
||||||
|
|
||||||
|
if self.space:
|
||||||
|
if not self.lastWasNL:
|
||||||
|
self.out(" ")
|
||||||
|
self.space = False
|
||||||
|
|
||||||
|
if self.a and (
|
||||||
|
(self.p_p == 2 and self.links_each_paragraph) or force == "end"
|
||||||
|
):
|
||||||
|
if force == "end":
|
||||||
|
self.out("\n")
|
||||||
|
|
||||||
|
newa = []
|
||||||
|
for link in self.a:
|
||||||
|
if self.outcount > link.outcount:
|
||||||
|
self.out(
|
||||||
|
" ["
|
||||||
|
+ str(link.count)
|
||||||
|
+ "]: "
|
||||||
|
+ urlparse.urljoin(self.baseurl, link.attrs["href"])
|
||||||
|
)
|
||||||
|
if "title" in link.attrs:
|
||||||
|
assert link.attrs["title"] is not None
|
||||||
|
self.out(" (" + link.attrs["title"] + ")")
|
||||||
|
self.out("\n")
|
||||||
|
else:
|
||||||
|
newa.append(link)
|
||||||
|
|
||||||
|
# Don't need an extra line when nothing was done.
|
||||||
|
if self.a != newa:
|
||||||
|
self.out("\n")
|
||||||
|
|
||||||
|
self.a = newa
|
||||||
|
|
||||||
|
if self.abbr_list and force == "end":
|
||||||
|
for abbr, definition in self.abbr_list.items():
|
||||||
|
self.out(" *[" + abbr + "]: " + definition + "\n")
|
||||||
|
|
||||||
|
self.p_p = 0
|
||||||
|
self.out(data)
|
||||||
|
self.outcount += 1
|
||||||
|
|
||||||
|
def handle_data(self, data: str, entity_char: bool = False) -> None:
|
||||||
|
if not data:
|
||||||
|
# Data may be empty for some HTML entities. For example,
|
||||||
|
# LEFT-TO-RIGHT MARK.
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.stressed:
|
||||||
|
data = data.strip()
|
||||||
|
self.stressed = False
|
||||||
|
self.preceding_stressed = True
|
||||||
|
elif self.preceding_stressed:
|
||||||
|
if (
|
||||||
|
re.match(r"[^][(){}\s.!?]", data[0])
|
||||||
|
and not hn(self.current_tag)
|
||||||
|
and self.current_tag not in ["a", "code", "pre"]
|
||||||
|
):
|
||||||
|
# should match a letter or common punctuation
|
||||||
|
data = " " + data
|
||||||
|
self.preceding_stressed = False
|
||||||
|
|
||||||
|
if self.style:
|
||||||
|
self.style_def.update(dumb_css_parser(data))
|
||||||
|
|
||||||
|
if self.maybe_automatic_link is not None:
|
||||||
|
href = self.maybe_automatic_link
|
||||||
|
if (
|
||||||
|
href == data
|
||||||
|
and self.absolute_url_matcher.match(href)
|
||||||
|
and self.use_automatic_links
|
||||||
|
):
|
||||||
|
self.o("<" + data + ">")
|
||||||
|
self.empty_link = False
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.o("[")
|
||||||
|
self.maybe_automatic_link = None
|
||||||
|
self.empty_link = False
|
||||||
|
|
||||||
|
if not self.code and not self.pre and not entity_char:
|
||||||
|
data = escape_md_section(data, snob=self.escape_snob)
|
||||||
|
self.preceding_data = data
|
||||||
|
self.o(data, puredata=True)
|
||||||
|
|
||||||
|
def charref(self, name: str) -> str:
|
||||||
|
if name[0] in ["x", "X"]:
|
||||||
|
c = int(name[1:], 16)
|
||||||
|
else:
|
||||||
|
c = int(name)
|
||||||
|
|
||||||
|
if not self.unicode_snob and c in unifiable_n:
|
||||||
|
return unifiable_n[c]
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
return chr(c)
|
||||||
|
except ValueError: # invalid unicode
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def entityref(self, c: str) -> str:
|
||||||
|
if not self.unicode_snob and c in config.UNIFIABLE:
|
||||||
|
return config.UNIFIABLE[c]
|
||||||
|
try:
|
||||||
|
ch = html.entities.html5[c + ";"]
|
||||||
|
except KeyError:
|
||||||
|
return "&" + c + ";"
|
||||||
|
return config.UNIFIABLE[c] if c == "nbsp" else ch
|
||||||
|
|
||||||
|
def google_nest_count(self, style: Dict[str, str]) -> int:
|
||||||
|
"""
|
||||||
|
Calculate the nesting count of google doc lists
|
||||||
|
|
||||||
|
:type style: dict
|
||||||
|
|
||||||
|
:rtype: int
|
||||||
|
"""
|
||||||
|
nest_count = 0
|
||||||
|
if "margin-left" in style:
|
||||||
|
nest_count = int(style["margin-left"][:-2]) // self.google_list_indent
|
||||||
|
|
||||||
|
return nest_count
|
||||||
|
|
||||||
|
def optwrap(self, text: str) -> str:
|
||||||
|
"""
|
||||||
|
Wrap all paragraphs in the provided text.
|
||||||
|
|
||||||
|
:type text: str
|
||||||
|
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
if not self.body_width:
|
||||||
|
return text
|
||||||
|
|
||||||
|
result = ""
|
||||||
|
newlines = 0
|
||||||
|
# I cannot think of a better solution for now.
|
||||||
|
# To avoid the non-wrap behaviour for entire paras
|
||||||
|
# because of the presence of a link in it
|
||||||
|
if not self.wrap_links:
|
||||||
|
self.inline_links = False
|
||||||
|
for para in text.split("\n"):
|
||||||
|
if len(para) > 0:
|
||||||
|
if not skipwrap(
|
||||||
|
para, self.wrap_links, self.wrap_list_items, self.wrap_tables
|
||||||
|
):
|
||||||
|
indent = ""
|
||||||
|
if para.startswith(" " + self.ul_item_mark):
|
||||||
|
# list item continuation: add a double indent to the
|
||||||
|
# new lines
|
||||||
|
indent = " "
|
||||||
|
elif para.startswith("> "):
|
||||||
|
# blockquote continuation: add the greater than symbol
|
||||||
|
# to the new lines
|
||||||
|
indent = "> "
|
||||||
|
wrapped = wrap(
|
||||||
|
para,
|
||||||
|
self.body_width,
|
||||||
|
break_long_words=False,
|
||||||
|
subsequent_indent=indent,
|
||||||
|
)
|
||||||
|
result += "\n".join(wrapped)
|
||||||
|
if para.endswith(" "):
|
||||||
|
result += " \n"
|
||||||
|
newlines = 1
|
||||||
|
elif indent:
|
||||||
|
result += "\n"
|
||||||
|
newlines = 1
|
||||||
|
else:
|
||||||
|
result += "\n\n"
|
||||||
|
newlines = 2
|
||||||
|
else:
|
||||||
|
# Warning for the tempted!!!
|
||||||
|
# Be aware that obvious replacement of this with
|
||||||
|
# line.isspace()
|
||||||
|
# DOES NOT work! Explanations are welcome.
|
||||||
|
if not config.RE_SPACE.match(para):
|
||||||
|
result += para + "\n"
|
||||||
|
newlines = 1
|
||||||
|
else:
|
||||||
|
if newlines < 2:
|
||||||
|
result += "\n"
|
||||||
|
newlines += 1
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) -> str:
|
||||||
|
if bodywidth is None:
|
||||||
|
bodywidth = config.BODY_WIDTH
|
||||||
|
h = HTML2Text(baseurl=baseurl, bodywidth=bodywidth)
|
||||||
|
|
||||||
|
return h.handle(html)
|
3
migration/html2text/__main__.py
Normal file
3
migration/html2text/__main__.py
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
from .cli import main
|
||||||
|
|
||||||
|
main()
|
322
migration/html2text/cli.py
Normal file
322
migration/html2text/cli.py
Normal file
|
@ -0,0 +1,322 @@
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from . import HTML2Text, __version__, config
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
baseurl = ""
|
||||||
|
|
||||||
|
class bcolors:
|
||||||
|
HEADER = "\033[95m"
|
||||||
|
OKBLUE = "\033[94m"
|
||||||
|
OKGREEN = "\033[92m"
|
||||||
|
WARNING = "\033[93m"
|
||||||
|
FAIL = "\033[91m"
|
||||||
|
ENDC = "\033[0m"
|
||||||
|
BOLD = "\033[1m"
|
||||||
|
UNDERLINE = "\033[4m"
|
||||||
|
|
||||||
|
p = argparse.ArgumentParser()
|
||||||
|
p.add_argument(
|
||||||
|
"--default-image-alt",
|
||||||
|
dest="default_image_alt",
|
||||||
|
default=config.DEFAULT_IMAGE_ALT,
|
||||||
|
help="The default alt string for images with missing ones",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--pad-tables",
|
||||||
|
dest="pad_tables",
|
||||||
|
action="store_true",
|
||||||
|
default=config.PAD_TABLES,
|
||||||
|
help="pad the cells to equal column width in tables",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--no-wrap-links",
|
||||||
|
dest="wrap_links",
|
||||||
|
action="store_false",
|
||||||
|
default=config.WRAP_LINKS,
|
||||||
|
help="don't wrap links during conversion",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--wrap-list-items",
|
||||||
|
dest="wrap_list_items",
|
||||||
|
action="store_true",
|
||||||
|
default=config.WRAP_LIST_ITEMS,
|
||||||
|
help="wrap list items during conversion",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--wrap-tables",
|
||||||
|
dest="wrap_tables",
|
||||||
|
action="store_true",
|
||||||
|
default=config.WRAP_TABLES,
|
||||||
|
help="wrap tables",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--ignore-emphasis",
|
||||||
|
dest="ignore_emphasis",
|
||||||
|
action="store_true",
|
||||||
|
default=config.IGNORE_EMPHASIS,
|
||||||
|
help="don't include any formatting for emphasis",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--reference-links",
|
||||||
|
dest="inline_links",
|
||||||
|
action="store_false",
|
||||||
|
default=config.INLINE_LINKS,
|
||||||
|
help="use reference style links instead of inline links",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--ignore-links",
|
||||||
|
dest="ignore_links",
|
||||||
|
action="store_true",
|
||||||
|
default=config.IGNORE_ANCHORS,
|
||||||
|
help="don't include any formatting for links",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--ignore-mailto-links",
|
||||||
|
action="store_true",
|
||||||
|
dest="ignore_mailto_links",
|
||||||
|
default=config.IGNORE_MAILTO_LINKS,
|
||||||
|
help="don't include mailto: links",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--protect-links",
|
||||||
|
dest="protect_links",
|
||||||
|
action="store_true",
|
||||||
|
default=config.PROTECT_LINKS,
|
||||||
|
help="protect links from line breaks surrounding them with angle brackets",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--ignore-images",
|
||||||
|
dest="ignore_images",
|
||||||
|
action="store_true",
|
||||||
|
default=config.IGNORE_IMAGES,
|
||||||
|
help="don't include any formatting for images",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--images-as-html",
|
||||||
|
dest="images_as_html",
|
||||||
|
action="store_true",
|
||||||
|
default=config.IMAGES_AS_HTML,
|
||||||
|
help=(
|
||||||
|
"Always write image tags as raw html; preserves `height`, `width` and "
|
||||||
|
"`alt` if possible."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--images-to-alt",
|
||||||
|
dest="images_to_alt",
|
||||||
|
action="store_true",
|
||||||
|
default=config.IMAGES_TO_ALT,
|
||||||
|
help="Discard image data, only keep alt text",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--images-with-size",
|
||||||
|
dest="images_with_size",
|
||||||
|
action="store_true",
|
||||||
|
default=config.IMAGES_WITH_SIZE,
|
||||||
|
help=(
|
||||||
|
"Write image tags with height and width attrs as raw html to retain "
|
||||||
|
"dimensions"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"-g",
|
||||||
|
"--google-doc",
|
||||||
|
action="store_true",
|
||||||
|
dest="google_doc",
|
||||||
|
default=False,
|
||||||
|
help="convert an html-exported Google Document",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"-d",
|
||||||
|
"--dash-unordered-list",
|
||||||
|
action="store_true",
|
||||||
|
dest="ul_style_dash",
|
||||||
|
default=False,
|
||||||
|
help="use a dash rather than a star for unordered list items",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"-e",
|
||||||
|
"--asterisk-emphasis",
|
||||||
|
action="store_true",
|
||||||
|
dest="em_style_asterisk",
|
||||||
|
default=False,
|
||||||
|
help="use an asterisk rather than an underscore for emphasized text",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"-b",
|
||||||
|
"--body-width",
|
||||||
|
dest="body_width",
|
||||||
|
type=int,
|
||||||
|
default=config.BODY_WIDTH,
|
||||||
|
help="number of characters per output line, 0 for no wrap",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"-i",
|
||||||
|
"--google-list-indent",
|
||||||
|
dest="list_indent",
|
||||||
|
type=int,
|
||||||
|
default=config.GOOGLE_LIST_INDENT,
|
||||||
|
help="number of pixels Google indents nested lists",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"-s",
|
||||||
|
"--hide-strikethrough",
|
||||||
|
action="store_true",
|
||||||
|
dest="hide_strikethrough",
|
||||||
|
default=False,
|
||||||
|
help="hide strike-through text. only relevant when -g is " "specified as well",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--escape-all",
|
||||||
|
action="store_true",
|
||||||
|
dest="escape_snob",
|
||||||
|
default=False,
|
||||||
|
help=(
|
||||||
|
"Escape all special characters. Output is less readable, but avoids "
|
||||||
|
"corner case formatting issues."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--bypass-tables",
|
||||||
|
action="store_true",
|
||||||
|
dest="bypass_tables",
|
||||||
|
default=config.BYPASS_TABLES,
|
||||||
|
help="Format tables in HTML rather than Markdown syntax.",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--ignore-tables",
|
||||||
|
action="store_true",
|
||||||
|
dest="ignore_tables",
|
||||||
|
default=config.IGNORE_TABLES,
|
||||||
|
help="Ignore table-related tags (table, th, td, tr) " "while keeping rows.",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--single-line-break",
|
||||||
|
action="store_true",
|
||||||
|
dest="single_line_break",
|
||||||
|
default=config.SINGLE_LINE_BREAK,
|
||||||
|
help=(
|
||||||
|
"Use a single line break after a block element rather than two line "
|
||||||
|
"breaks. NOTE: Requires --body-width=0"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--unicode-snob",
|
||||||
|
action="store_true",
|
||||||
|
dest="unicode_snob",
|
||||||
|
default=config.UNICODE_SNOB,
|
||||||
|
help="Use unicode throughout document",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--no-automatic-links",
|
||||||
|
action="store_false",
|
||||||
|
dest="use_automatic_links",
|
||||||
|
default=config.USE_AUTOMATIC_LINKS,
|
||||||
|
help="Do not use automatic links wherever applicable",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--no-skip-internal-links",
|
||||||
|
action="store_false",
|
||||||
|
dest="skip_internal_links",
|
||||||
|
default=config.SKIP_INTERNAL_LINKS,
|
||||||
|
help="Do not skip internal links",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--links-after-para",
|
||||||
|
action="store_true",
|
||||||
|
dest="links_each_paragraph",
|
||||||
|
default=config.LINKS_EACH_PARAGRAPH,
|
||||||
|
help="Put links after each paragraph instead of document",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--mark-code",
|
||||||
|
action="store_true",
|
||||||
|
dest="mark_code",
|
||||||
|
default=config.MARK_CODE,
|
||||||
|
help="Mark program code blocks with [code]...[/code]",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--decode-errors",
|
||||||
|
dest="decode_errors",
|
||||||
|
default=config.DECODE_ERRORS,
|
||||||
|
help=(
|
||||||
|
"What to do in case of decode errors.'ignore', 'strict' and 'replace' are "
|
||||||
|
"acceptable values"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--open-quote",
|
||||||
|
dest="open_quote",
|
||||||
|
default=config.OPEN_QUOTE,
|
||||||
|
help="The character used to open quotes",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--close-quote",
|
||||||
|
dest="close_quote",
|
||||||
|
default=config.CLOSE_QUOTE,
|
||||||
|
help="The character used to close quotes",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--version", action="version", version=".".join(map(str, __version__))
|
||||||
|
)
|
||||||
|
p.add_argument("filename", nargs="?")
|
||||||
|
p.add_argument("encoding", nargs="?", default="utf-8")
|
||||||
|
args = p.parse_args()
|
||||||
|
|
||||||
|
if args.filename and args.filename != "-":
|
||||||
|
with open(args.filename, "rb") as fp:
|
||||||
|
data = fp.read()
|
||||||
|
else:
|
||||||
|
data = sys.stdin.buffer.read()
|
||||||
|
|
||||||
|
try:
|
||||||
|
html = data.decode(args.encoding, args.decode_errors)
|
||||||
|
except UnicodeDecodeError as err:
|
||||||
|
warning = bcolors.WARNING + "Warning:" + bcolors.ENDC
|
||||||
|
warning += " Use the " + bcolors.OKGREEN
|
||||||
|
warning += "--decode-errors=ignore" + bcolors.ENDC + " flag."
|
||||||
|
print(warning)
|
||||||
|
raise err
|
||||||
|
|
||||||
|
h = HTML2Text(baseurl=baseurl)
|
||||||
|
# handle options
|
||||||
|
if args.ul_style_dash:
|
||||||
|
h.ul_item_mark = "-"
|
||||||
|
if args.em_style_asterisk:
|
||||||
|
h.emphasis_mark = "*"
|
||||||
|
h.strong_mark = "__"
|
||||||
|
|
||||||
|
h.body_width = args.body_width
|
||||||
|
h.google_list_indent = args.list_indent
|
||||||
|
h.ignore_emphasis = args.ignore_emphasis
|
||||||
|
h.ignore_links = args.ignore_links
|
||||||
|
h.ignore_mailto_links = args.ignore_mailto_links
|
||||||
|
h.protect_links = args.protect_links
|
||||||
|
h.ignore_images = args.ignore_images
|
||||||
|
h.images_as_html = args.images_as_html
|
||||||
|
h.images_to_alt = args.images_to_alt
|
||||||
|
h.images_with_size = args.images_with_size
|
||||||
|
h.google_doc = args.google_doc
|
||||||
|
h.hide_strikethrough = args.hide_strikethrough
|
||||||
|
h.escape_snob = args.escape_snob
|
||||||
|
h.bypass_tables = args.bypass_tables
|
||||||
|
h.ignore_tables = args.ignore_tables
|
||||||
|
h.single_line_break = args.single_line_break
|
||||||
|
h.inline_links = args.inline_links
|
||||||
|
h.unicode_snob = args.unicode_snob
|
||||||
|
h.use_automatic_links = args.use_automatic_links
|
||||||
|
h.skip_internal_links = args.skip_internal_links
|
||||||
|
h.links_each_paragraph = args.links_each_paragraph
|
||||||
|
h.mark_code = args.mark_code
|
||||||
|
h.wrap_links = args.wrap_links
|
||||||
|
h.wrap_list_items = args.wrap_list_items
|
||||||
|
h.wrap_tables = args.wrap_tables
|
||||||
|
h.pad_tables = args.pad_tables
|
||||||
|
h.default_image_alt = args.default_image_alt
|
||||||
|
h.open_quote = args.open_quote
|
||||||
|
h.close_quote = args.close_quote
|
||||||
|
|
||||||
|
sys.stdout.write(h.handle(html))
|
165
migration/html2text/config.py
Normal file
165
migration/html2text/config.py
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
# Use Unicode characters instead of their ascii pseudo-replacements
|
||||||
|
UNICODE_SNOB = False
|
||||||
|
|
||||||
|
# Marker to use for marking tables for padding post processing
|
||||||
|
TABLE_MARKER_FOR_PAD = "special_marker_for_table_padding"
|
||||||
|
# Escape all special characters. Output is less readable, but avoids
|
||||||
|
# corner case formatting issues.
|
||||||
|
ESCAPE_SNOB = False
|
||||||
|
|
||||||
|
# Put the links after each paragraph instead of at the end.
|
||||||
|
LINKS_EACH_PARAGRAPH = False
|
||||||
|
|
||||||
|
# Wrap long lines at position. 0 for no wrapping.
|
||||||
|
BODY_WIDTH = 78
|
||||||
|
|
||||||
|
# Don't show internal links (href="#local-anchor") -- corresponding link
|
||||||
|
# targets won't be visible in the plain text file anyway.
|
||||||
|
SKIP_INTERNAL_LINKS = True
|
||||||
|
|
||||||
|
# Use inline, rather than reference, formatting for images and links
|
||||||
|
INLINE_LINKS = True
|
||||||
|
|
||||||
|
# Protect links from line breaks surrounding them with angle brackets (in
|
||||||
|
# addition to their square brackets)
|
||||||
|
PROTECT_LINKS = False
|
||||||
|
# WRAP_LINKS = True
|
||||||
|
WRAP_LINKS = True
|
||||||
|
|
||||||
|
# Wrap list items.
|
||||||
|
WRAP_LIST_ITEMS = False
|
||||||
|
|
||||||
|
# Wrap tables
|
||||||
|
WRAP_TABLES = False
|
||||||
|
|
||||||
|
# Number of pixels Google indents nested lists
|
||||||
|
GOOGLE_LIST_INDENT = 36
|
||||||
|
|
||||||
|
# Values Google and others may use to indicate bold text
|
||||||
|
BOLD_TEXT_STYLE_VALUES = ("bold", "700", "800", "900")
|
||||||
|
|
||||||
|
IGNORE_ANCHORS = False
|
||||||
|
IGNORE_MAILTO_LINKS = False
|
||||||
|
IGNORE_IMAGES = False
|
||||||
|
IMAGES_AS_HTML = False
|
||||||
|
IMAGES_TO_ALT = False
|
||||||
|
IMAGES_WITH_SIZE = False
|
||||||
|
IGNORE_EMPHASIS = False
|
||||||
|
MARK_CODE = False
|
||||||
|
DECODE_ERRORS = "strict"
|
||||||
|
DEFAULT_IMAGE_ALT = ""
|
||||||
|
PAD_TABLES = False
|
||||||
|
|
||||||
|
# Convert links with same href and text to <href> format
|
||||||
|
# if they are absolute links
|
||||||
|
USE_AUTOMATIC_LINKS = True
|
||||||
|
|
||||||
|
# For checking space-only lines on line 771
|
||||||
|
RE_SPACE = re.compile(r"\s\+")
|
||||||
|
|
||||||
|
RE_ORDERED_LIST_MATCHER = re.compile(r"\d+\.\s")
|
||||||
|
RE_UNORDERED_LIST_MATCHER = re.compile(r"[-\*\+]\s")
|
||||||
|
RE_MD_CHARS_MATCHER = re.compile(r"([\\\[\]\(\)])")
|
||||||
|
RE_MD_CHARS_MATCHER_ALL = re.compile(r"([`\*_{}\[\]\(\)#!])")
|
||||||
|
|
||||||
|
# to find links in the text
|
||||||
|
RE_LINK = re.compile(r"(\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)")
|
||||||
|
|
||||||
|
# to find table separators
|
||||||
|
RE_TABLE = re.compile(r" \| ")
|
||||||
|
|
||||||
|
RE_MD_DOT_MATCHER = re.compile(
|
||||||
|
r"""
|
||||||
|
^ # start of line
|
||||||
|
(\s*\d+) # optional whitespace and a number
|
||||||
|
(\.) # dot
|
||||||
|
(?=\s) # lookahead assert whitespace
|
||||||
|
""",
|
||||||
|
re.MULTILINE | re.VERBOSE,
|
||||||
|
)
|
||||||
|
RE_MD_PLUS_MATCHER = re.compile(
|
||||||
|
r"""
|
||||||
|
^
|
||||||
|
(\s*)
|
||||||
|
(\+)
|
||||||
|
(?=\s)
|
||||||
|
""",
|
||||||
|
flags=re.MULTILINE | re.VERBOSE,
|
||||||
|
)
|
||||||
|
RE_MD_DASH_MATCHER = re.compile(
|
||||||
|
r"""
|
||||||
|
^
|
||||||
|
(\s*)
|
||||||
|
(-)
|
||||||
|
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
|
||||||
|
# or another dash (header or hr)
|
||||||
|
""",
|
||||||
|
flags=re.MULTILINE | re.VERBOSE,
|
||||||
|
)
|
||||||
|
RE_SLASH_CHARS = r"\`*_{}[]()#+-.!"
|
||||||
|
RE_MD_BACKSLASH_MATCHER = re.compile(
|
||||||
|
r"""
|
||||||
|
(\\) # match one slash
|
||||||
|
(?=[%s]) # followed by a char that requires escaping
|
||||||
|
"""
|
||||||
|
% re.escape(RE_SLASH_CHARS),
|
||||||
|
flags=re.VERBOSE,
|
||||||
|
)
|
||||||
|
|
||||||
|
UNIFIABLE = {
|
||||||
|
"rsquo": "'",
|
||||||
|
"lsquo": "'",
|
||||||
|
"rdquo": '"',
|
||||||
|
"ldquo": '"',
|
||||||
|
"copy": "(C)",
|
||||||
|
"mdash": "--",
|
||||||
|
"nbsp": " ",
|
||||||
|
"rarr": "->",
|
||||||
|
"larr": "<-",
|
||||||
|
"middot": "*",
|
||||||
|
"ndash": "-",
|
||||||
|
"oelig": "oe",
|
||||||
|
"aelig": "ae",
|
||||||
|
"agrave": "a",
|
||||||
|
"aacute": "a",
|
||||||
|
"acirc": "a",
|
||||||
|
"atilde": "a",
|
||||||
|
"auml": "a",
|
||||||
|
"aring": "a",
|
||||||
|
"egrave": "e",
|
||||||
|
"eacute": "e",
|
||||||
|
"ecirc": "e",
|
||||||
|
"euml": "e",
|
||||||
|
"igrave": "i",
|
||||||
|
"iacute": "i",
|
||||||
|
"icirc": "i",
|
||||||
|
"iuml": "i",
|
||||||
|
"ograve": "o",
|
||||||
|
"oacute": "o",
|
||||||
|
"ocirc": "o",
|
||||||
|
"otilde": "o",
|
||||||
|
"ouml": "o",
|
||||||
|
"ugrave": "u",
|
||||||
|
"uacute": "u",
|
||||||
|
"ucirc": "u",
|
||||||
|
"uuml": "u",
|
||||||
|
"lrm": "",
|
||||||
|
"rlm": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Format tables in HTML rather than Markdown syntax
|
||||||
|
BYPASS_TABLES = False
|
||||||
|
# Ignore table-related tags (table, th, td, tr) while keeping rows
|
||||||
|
IGNORE_TABLES = False
|
||||||
|
|
||||||
|
|
||||||
|
# Use a single line break after a block element rather than two line breaks.
|
||||||
|
# NOTE: Requires body width setting to be 0.
|
||||||
|
SINGLE_LINE_BREAK = False
|
||||||
|
|
||||||
|
|
||||||
|
# Use double quotation marks when converting the <q> tag.
|
||||||
|
OPEN_QUOTE = '"'
|
||||||
|
CLOSE_QUOTE = '"'
|
18
migration/html2text/elements.py
Normal file
18
migration/html2text/elements.py
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
from typing import Dict, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class AnchorElement:
|
||||||
|
__slots__ = ["attrs", "count", "outcount"]
|
||||||
|
|
||||||
|
def __init__(self, attrs: Dict[str, Optional[str]], count: int, outcount: int):
|
||||||
|
self.attrs = attrs
|
||||||
|
self.count = count
|
||||||
|
self.outcount = outcount
|
||||||
|
|
||||||
|
|
||||||
|
class ListElement:
|
||||||
|
__slots__ = ["name", "num"]
|
||||||
|
|
||||||
|
def __init__(self, name: str, num: int):
|
||||||
|
self.name = name
|
||||||
|
self.num = num
|
0
migration/html2text/py.typed
Normal file
0
migration/html2text/py.typed
Normal file
3
migration/html2text/typing.py
Normal file
3
migration/html2text/typing.py
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
class OutCallback:
|
||||||
|
def __call__(self, s: str) -> None:
|
||||||
|
...
|
290
migration/html2text/utils.py
Normal file
290
migration/html2text/utils.py
Normal file
|
@ -0,0 +1,290 @@
|
||||||
|
import html.entities
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
|
from . import config
|
||||||
|
|
||||||
|
unifiable_n = {
|
||||||
|
html.entities.name2codepoint[k]: v
|
||||||
|
for k, v in config.UNIFIABLE.items()
|
||||||
|
if k != "nbsp"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def hn(tag: str) -> int:
|
||||||
|
if tag[0] == "h" and len(tag) == 2:
|
||||||
|
n = tag[1]
|
||||||
|
if "0" < n <= "9":
|
||||||
|
return int(n)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def dumb_property_dict(style: str) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
:returns: A hash of css attributes
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
x.strip().lower(): y.strip().lower()
|
||||||
|
for x, y in [z.split(":", 1) for z in style.split(";") if ":" in z]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def dumb_css_parser(data: str) -> Dict[str, Dict[str, str]]:
|
||||||
|
"""
|
||||||
|
:type data: str
|
||||||
|
|
||||||
|
:returns: A hash of css selectors, each of which contains a hash of
|
||||||
|
css attributes.
|
||||||
|
:rtype: dict
|
||||||
|
"""
|
||||||
|
# remove @import sentences
|
||||||
|
data += ";"
|
||||||
|
importIndex = data.find("@import")
|
||||||
|
while importIndex != -1:
|
||||||
|
data = data[0:importIndex] + data[data.find(";", importIndex) + 1 :]
|
||||||
|
importIndex = data.find("@import")
|
||||||
|
|
||||||
|
# parse the css. reverted from dictionary comprehension in order to
|
||||||
|
# support older pythons
|
||||||
|
pairs = [x.split("{") for x in data.split("}") if "{" in x.strip()]
|
||||||
|
try:
|
||||||
|
elements = {a.strip(): dumb_property_dict(b) for a, b in pairs}
|
||||||
|
except ValueError:
|
||||||
|
elements = {} # not that important
|
||||||
|
|
||||||
|
return elements
|
||||||
|
|
||||||
|
|
||||||
|
def element_style(
|
||||||
|
attrs: Dict[str, Optional[str]],
|
||||||
|
style_def: Dict[str, Dict[str, str]],
|
||||||
|
parent_style: Dict[str, str],
|
||||||
|
) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
:type attrs: dict
|
||||||
|
:type style_def: dict
|
||||||
|
:type style_def: dict
|
||||||
|
|
||||||
|
:returns: A hash of the 'final' style attributes of the element
|
||||||
|
:rtype: dict
|
||||||
|
"""
|
||||||
|
style = parent_style.copy()
|
||||||
|
if "class" in attrs:
|
||||||
|
assert attrs["class"] is not None
|
||||||
|
for css_class in attrs["class"].split():
|
||||||
|
css_style = style_def.get("." + css_class, {})
|
||||||
|
style.update(css_style)
|
||||||
|
if "style" in attrs:
|
||||||
|
assert attrs["style"] is not None
|
||||||
|
immediate_style = dumb_property_dict(attrs["style"])
|
||||||
|
style.update(immediate_style)
|
||||||
|
|
||||||
|
return style
|
||||||
|
|
||||||
|
|
||||||
|
def google_list_style(style: Dict[str, str]) -> str:
|
||||||
|
"""
|
||||||
|
Finds out whether this is an ordered or unordered list
|
||||||
|
|
||||||
|
:type style: dict
|
||||||
|
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
if "list-style-type" in style:
|
||||||
|
list_style = style["list-style-type"]
|
||||||
|
if list_style in ["disc", "circle", "square", "none"]:
|
||||||
|
return "ul"
|
||||||
|
|
||||||
|
return "ol"
|
||||||
|
|
||||||
|
|
||||||
|
def google_has_height(style: Dict[str, str]) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the style of the element has the 'height' attribute
|
||||||
|
explicitly defined
|
||||||
|
|
||||||
|
:type style: dict
|
||||||
|
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
return "height" in style
|
||||||
|
|
||||||
|
|
||||||
|
def google_text_emphasis(style: Dict[str, str]) -> List[str]:
|
||||||
|
"""
|
||||||
|
:type style: dict
|
||||||
|
|
||||||
|
:returns: A list of all emphasis modifiers of the element
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
emphasis = []
|
||||||
|
if "text-decoration" in style:
|
||||||
|
emphasis.append(style["text-decoration"])
|
||||||
|
if "font-style" in style:
|
||||||
|
emphasis.append(style["font-style"])
|
||||||
|
if "font-weight" in style:
|
||||||
|
emphasis.append(style["font-weight"])
|
||||||
|
|
||||||
|
return emphasis
|
||||||
|
|
||||||
|
|
||||||
|
def google_fixed_width_font(style: Dict[str, str]) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the css of the current element defines a fixed width font
|
||||||
|
|
||||||
|
:type style: dict
|
||||||
|
|
||||||
|
:rtype: bool
|
||||||
|
"""
|
||||||
|
font_family = ""
|
||||||
|
if "font-family" in style:
|
||||||
|
font_family = style["font-family"]
|
||||||
|
return "courier new" == font_family or "consolas" == font_family
|
||||||
|
|
||||||
|
|
||||||
|
def list_numbering_start(attrs: Dict[str, Optional[str]]) -> int:
|
||||||
|
"""
|
||||||
|
Extract numbering from list element attributes
|
||||||
|
|
||||||
|
:type attrs: dict
|
||||||
|
|
||||||
|
:rtype: int or None
|
||||||
|
"""
|
||||||
|
if "start" in attrs:
|
||||||
|
assert attrs["start"] is not None
|
||||||
|
try:
|
||||||
|
return int(attrs["start"]) - 1
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def skipwrap(
|
||||||
|
para: str, wrap_links: bool, wrap_list_items: bool, wrap_tables: bool
|
||||||
|
) -> bool:
|
||||||
|
# If it appears to contain a link
|
||||||
|
# don't wrap
|
||||||
|
if not wrap_links and config.RE_LINK.search(para):
|
||||||
|
return True
|
||||||
|
# If the text begins with four spaces or one tab, it's a code block;
|
||||||
|
# don't wrap
|
||||||
|
if para[0:4] == " " or para[0] == "\t":
|
||||||
|
return True
|
||||||
|
|
||||||
|
# If the text begins with only two "--", possibly preceded by
|
||||||
|
# whitespace, that's an emdash; so wrap.
|
||||||
|
stripped = para.lstrip()
|
||||||
|
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
|
||||||
|
return False
|
||||||
|
|
||||||
|
# I'm not sure what this is for; I thought it was to detect lists,
|
||||||
|
# but there's a <br>-inside-<span> case in one of the tests that
|
||||||
|
# also depends upon it.
|
||||||
|
if stripped[0:1] in ("-", "*") and not stripped[0:2] == "**":
|
||||||
|
return not wrap_list_items
|
||||||
|
|
||||||
|
# If text contains a pipe character it is likely a table
|
||||||
|
if not wrap_tables and config.RE_TABLE.search(para):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# If the text begins with a single -, *, or +, followed by a space,
|
||||||
|
# or an integer, followed by a ., followed by a space (in either
|
||||||
|
# case optionally proceeded by whitespace), it's a list; don't wrap.
|
||||||
|
return bool(
|
||||||
|
config.RE_ORDERED_LIST_MATCHER.match(stripped)
|
||||||
|
or config.RE_UNORDERED_LIST_MATCHER.match(stripped)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def escape_md(text: str) -> str:
|
||||||
|
"""
|
||||||
|
Escapes markdown-sensitive characters within other markdown
|
||||||
|
constructs.
|
||||||
|
"""
|
||||||
|
return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text)
|
||||||
|
|
||||||
|
|
||||||
|
def escape_md_section(text: str, snob: bool = False) -> str:
|
||||||
|
"""
|
||||||
|
Escapes markdown-sensitive characters across whole document sections.
|
||||||
|
"""
|
||||||
|
text = config.RE_MD_BACKSLASH_MATCHER.sub(r"\\\1", text)
|
||||||
|
|
||||||
|
if snob:
|
||||||
|
text = config.RE_MD_CHARS_MATCHER_ALL.sub(r"\\\1", text)
|
||||||
|
|
||||||
|
text = config.RE_MD_DOT_MATCHER.sub(r"\1\\\2", text)
|
||||||
|
text = config.RE_MD_PLUS_MATCHER.sub(r"\1\\\2", text)
|
||||||
|
text = config.RE_MD_DASH_MATCHER.sub(r"\1\\\2", text)
|
||||||
|
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
|
||||||
|
"""
|
||||||
|
Given the lines of a table
|
||||||
|
padds the cells and returns the new lines
|
||||||
|
"""
|
||||||
|
# find the maximum width of the columns
|
||||||
|
max_width = [len(x.rstrip()) + right_margin for x in lines[0].split("|")]
|
||||||
|
max_cols = len(max_width)
|
||||||
|
for line in lines:
|
||||||
|
cols = [x.rstrip() for x in line.split("|")]
|
||||||
|
num_cols = len(cols)
|
||||||
|
|
||||||
|
# don't drop any data if colspan attributes result in unequal lengths
|
||||||
|
if num_cols < max_cols:
|
||||||
|
cols += [""] * (max_cols - num_cols)
|
||||||
|
elif max_cols < num_cols:
|
||||||
|
max_width += [len(x) + right_margin for x in cols[-(num_cols - max_cols) :]]
|
||||||
|
max_cols = num_cols
|
||||||
|
|
||||||
|
max_width = [
|
||||||
|
max(len(x) + right_margin, old_len) for x, old_len in zip(cols, max_width)
|
||||||
|
]
|
||||||
|
|
||||||
|
# reformat
|
||||||
|
new_lines = []
|
||||||
|
for line in lines:
|
||||||
|
cols = [x.rstrip() for x in line.split("|")]
|
||||||
|
if set(line.strip()) == set("-|"):
|
||||||
|
filler = "-"
|
||||||
|
new_cols = [
|
||||||
|
x.rstrip() + (filler * (M - len(x.rstrip())))
|
||||||
|
for x, M in zip(cols, max_width)
|
||||||
|
]
|
||||||
|
new_lines.append("|-" + "|".join(new_cols) + "|")
|
||||||
|
else:
|
||||||
|
filler = " "
|
||||||
|
new_cols = [
|
||||||
|
x.rstrip() + (filler * (M - len(x.rstrip())))
|
||||||
|
for x, M in zip(cols, max_width)
|
||||||
|
]
|
||||||
|
new_lines.append("| " + "|".join(new_cols) + "|")
|
||||||
|
return new_lines
|
||||||
|
|
||||||
|
|
||||||
|
def pad_tables_in_text(text: str, right_margin: int = 1) -> str:
|
||||||
|
"""
|
||||||
|
Provide padding for tables in the text
|
||||||
|
"""
|
||||||
|
lines = text.split("\n")
|
||||||
|
table_buffer = [] # type: List[str]
|
||||||
|
table_started = False
|
||||||
|
new_lines = []
|
||||||
|
for line in lines:
|
||||||
|
# Toggle table started
|
||||||
|
if config.TABLE_MARKER_FOR_PAD in line:
|
||||||
|
table_started = not table_started
|
||||||
|
if not table_started:
|
||||||
|
table = reformat_table(table_buffer, right_margin)
|
||||||
|
new_lines.extend(table)
|
||||||
|
table_buffer = []
|
||||||
|
new_lines.append("")
|
||||||
|
continue
|
||||||
|
# Process lines
|
||||||
|
if table_started:
|
||||||
|
table_buffer.append(line)
|
||||||
|
else:
|
||||||
|
new_lines.append(line)
|
||||||
|
return "\n".join(new_lines)
|
|
@ -6,9 +6,6 @@ from orm import Shout, Comment, CommentRating, User
|
||||||
from orm.base import local_session
|
from orm.base import local_session
|
||||||
from migration.html2text import html2text
|
from migration.html2text import html2text
|
||||||
|
|
||||||
# users_dict = json.loads(open(abspath('migration/data/users.dict.json')).read())
|
|
||||||
# topics_dict = json.loads(open(abspath('migration/data/topics.dict.json')).read()) # old_id keyed
|
|
||||||
|
|
||||||
def migrate(entry):
|
def migrate(entry):
|
||||||
'''
|
'''
|
||||||
{
|
{
|
||||||
|
@ -55,33 +52,38 @@ def migrate(entry):
|
||||||
'author': author.id if author else 0,
|
'author': author.id if author else 0,
|
||||||
'createdAt': date_parse(entry['createdAt']),
|
'createdAt': date_parse(entry['createdAt']),
|
||||||
'body': html2text(entry['body']),
|
'body': html2text(entry['body']),
|
||||||
'shout': shout
|
'shout': shout.id
|
||||||
}
|
}
|
||||||
if 'rating' in entry:
|
if 'rating' in entry:
|
||||||
comment_dict['rating'] = entry['rating']
|
comment_dict['rating'] = entry['rating']
|
||||||
if entry.get('deleted'):
|
if entry.get('deleted'):
|
||||||
comment_dict['deletedAt'] = entry['updatedAt']
|
comment_dict['deletedAt'] = date_parse(entry['updatedAt'])
|
||||||
comment_dict['deletedBy'] = entry['updatedBy']
|
comment_dict['deletedBy'] = str(entry['updatedBy'])
|
||||||
|
if entry.get('updatedAt'):
|
||||||
|
comment_dict['updatedAt'] = date_parse(entry['updatedAt'])
|
||||||
|
# comment_dict['updatedBy'] = str(entry.get('updatedBy', 0)) invalid keyword for Comment
|
||||||
if 'thread' in entry:
|
if 'thread' in entry:
|
||||||
comment_dict['old_thread'] = entry['thread']
|
comment_dict['old_thread'] = entry['thread']
|
||||||
print(comment_dict)
|
# print(comment_dict)
|
||||||
comment = Comment.create(**comment_dict)
|
comment = Comment.create(**comment_dict)
|
||||||
print(comment)
|
comment_dict['id'] = comment.id
|
||||||
|
comment_dict['ratings'] = []
|
||||||
|
# print(comment)
|
||||||
for comment_rating_old in entry.get('ratings',[]):
|
for comment_rating_old in entry.get('ratings',[]):
|
||||||
rater_id = session.query(User).filter(User.old_id == comment_rating_old['createdBy']).first()
|
rater = session.query(User).filter(User.old_id == comment_rating_old['createdBy']).first()
|
||||||
createdTs = comment_rating_old.get('createdAt', datetime.datetime.now())
|
if rater and comment:
|
||||||
u = entry.get('updatedAt', False)
|
comment_rating_dict = {
|
||||||
comment_rating_dict = {
|
'value': comment_rating_old['value'],
|
||||||
'value': comment_rating_old['value'],
|
'createdBy': rater.id,
|
||||||
'createdBy': rater_id or 0,
|
'comment_id': comment.id
|
||||||
'createdAt': createdTs,
|
}
|
||||||
'comment_id': comment.id
|
cts = comment_rating_old.get('createdAt')
|
||||||
}
|
if cts: comment_rating_dict['createdAt'] = date_parse(cts)
|
||||||
try:
|
try:
|
||||||
comment_rating = CommentRating.create(**comment_rating_dict)
|
comment_rating = CommentRating.create(**comment_rating_dict)
|
||||||
# TODO: comment rating append resolver
|
# comment_rating_dict['id'] = comment_rating.id
|
||||||
# comment['ratings'].append(comment_rating)
|
comment_dict['ratings'].append(comment_rating_dict)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(comment_rating)
|
print(comment_rating_dict)
|
||||||
pass # raise e
|
raise e
|
||||||
return comment
|
return comment_dict
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from orm.base import local_session
|
from orm.base import local_session
|
||||||
from orm import Topic
|
from orm import Topic
|
||||||
# from dateutil.parser import parse as date_parse
|
from dateutil.parser import parse as date_parse
|
||||||
|
|
||||||
def migrate(entry):
|
def migrate(entry):
|
||||||
'''
|
'''
|
||||||
|
@ -16,7 +16,7 @@ def migrate(entry):
|
||||||
topic_dict = {
|
topic_dict = {
|
||||||
'slug': entry['slug'],
|
'slug': entry['slug'],
|
||||||
'createdBy': entry['createdBy'], # NOTE: uses an old user id
|
'createdBy': entry['createdBy'], # NOTE: uses an old user id
|
||||||
'createdAt': entry['createdAt'],
|
'createdAt': date_parse(entry['createdAt']),
|
||||||
'title': entry['title'].lower(),
|
'title': entry['title'].lower(),
|
||||||
'parents': [],
|
'parents': [],
|
||||||
'children': [],
|
'children': [],
|
||||||
|
@ -31,4 +31,4 @@ def migrate(entry):
|
||||||
return topic_dict
|
return topic_dict
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
return {}
|
raise e
|
|
@ -1,7 +1,8 @@
|
||||||
from dateutil.parser import parse as date_parse
|
from dateutil.parser import parse as date_parse
|
||||||
from os.path import abspath
|
|
||||||
import frontmatter
|
import frontmatter
|
||||||
import json
|
import json
|
||||||
|
import sqlite3
|
||||||
|
import sqlalchemy
|
||||||
from orm import Shout, Comment, Topic, ShoutRating, User #, TODO: CommentRating
|
from orm import Shout, Comment, Topic, ShoutRating, User #, TODO: CommentRating
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from migration.html2text import html2text
|
from migration.html2text import html2text
|
||||||
|
@ -11,23 +12,7 @@ from datetime import datetime
|
||||||
from sqlalchemy.exc import IntegrityError
|
from sqlalchemy.exc import IntegrityError
|
||||||
from orm.base import local_session
|
from orm.base import local_session
|
||||||
|
|
||||||
users_dict = json.loads(open(abspath('migration/data/users.dict.json')).read())
|
DISCOURS_USER = {
|
||||||
print(str(len(users_dict.items())) + ' users loaded')
|
|
||||||
|
|
||||||
cats_data = json.loads(open(abspath('migration/data/content_item_categories.json')).read()) # old_id keyed
|
|
||||||
cats_dict = { x['_id']: x for x in cats_data }
|
|
||||||
print(str(len(cats_data)) + ' categories loaded')
|
|
||||||
|
|
||||||
comments_data = json.loads(open(abspath('migration/data/comments.json')).read())
|
|
||||||
print(str(len(comments_data)) + ' comments loaded')
|
|
||||||
|
|
||||||
comments_by_post = {}
|
|
||||||
for comment in comments_data:
|
|
||||||
p = comment['contentItem']
|
|
||||||
comments_by_post[p] = comments_by_post.get(p, [])
|
|
||||||
comments_by_post[p].append(comment)
|
|
||||||
|
|
||||||
users_dict['0'] = {
|
|
||||||
'id': 9999999,
|
'id': 9999999,
|
||||||
'slug': 'discours',
|
'slug': 'discours',
|
||||||
'name': 'Дискурс',
|
'name': 'Дискурс',
|
||||||
|
@ -57,7 +42,7 @@ def get_metadata(r):
|
||||||
metadata['cover'] = r.get('cover')
|
metadata['cover'] = r.get('cover')
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
def migrate(entry):
|
def migrate(entry, users_by_oid, topics_by_oid):
|
||||||
'''
|
'''
|
||||||
type Shout {
|
type Shout {
|
||||||
slug: String!
|
slug: String!
|
||||||
|
@ -92,7 +77,6 @@ def migrate(entry):
|
||||||
'views': entry.get('views', 0),
|
'views': entry.get('views', 0),
|
||||||
'rating': entry.get('rating', 0),
|
'rating': entry.get('rating', 0),
|
||||||
'ratings': [],
|
'ratings': [],
|
||||||
'comments': [],
|
|
||||||
'createdAt': entry.get('createdAt', '2016-03-05 22:22:00.350000')
|
'createdAt': entry.get('createdAt', '2016-03-05 22:22:00.350000')
|
||||||
}
|
}
|
||||||
r['slug'] = entry.get('slug', '')
|
r['slug'] = entry.get('slug', '')
|
||||||
|
@ -106,7 +90,7 @@ def migrate(entry):
|
||||||
# print(entry)
|
# print(entry)
|
||||||
raise Exception
|
raise Exception
|
||||||
try:
|
try:
|
||||||
r['topics'].append(cats_dict[entry['category']]['slug'])
|
r['topics'].append(topics_by_oid[entry['category']]['slug'])
|
||||||
except Exception:
|
except Exception:
|
||||||
print(entry['category'])
|
print(entry['category'])
|
||||||
if entry.get('image') is not None:
|
if entry.get('image') is not None:
|
||||||
|
@ -149,110 +133,102 @@ def migrate(entry):
|
||||||
r['body'] = html2text(body_html)
|
r['body'] = html2text(body_html)
|
||||||
body = r.get('body', '')
|
body = r.get('body', '')
|
||||||
r['old_id'] = entry.get('_id')
|
r['old_id'] = entry.get('_id')
|
||||||
user = None
|
|
||||||
try:
|
# get author data
|
||||||
userdata = users_dict.get(entry['createdBy'], users_dict['0'])
|
userdata = {}
|
||||||
slug = userdata['slug']
|
try: userdata = users_by_oid[entry['createdBy']]
|
||||||
name = userdata['name']
|
|
||||||
userpic = userdata['userpic']
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
app = entry.get('application')
|
app = entry.get('application')
|
||||||
if app is not None:
|
if app:
|
||||||
authordata = {
|
userdata = {
|
||||||
'username': app['email'],
|
'username': app['email'],
|
||||||
'email': app['email'],
|
'email': app['email'],
|
||||||
'name': app['name'],
|
'name': app['name'],
|
||||||
'bio': app.get('bio', ''),
|
'bio': app.get('bio', ''),
|
||||||
'emailConfirmed': False,
|
'emailConfirmed': False,
|
||||||
'slug': translit(app['name'], 'ru', reversed=True).replace(' ', '-').lower(),
|
'slug': translit(app['name'], 'ru', reversed=True).replace(' ', '-').replace('\'', '').lower(),
|
||||||
'createdAt': ts,
|
'createdAt': ts,
|
||||||
'wasOnlineAt': ts
|
'wasOnlineAt': ts
|
||||||
}
|
}
|
||||||
try:
|
if userdata == {}:
|
||||||
user = User.create(**authordata)
|
userdata = {
|
||||||
except IntegrityError:
|
'name': 'Дискурс',
|
||||||
with local_session() as session:
|
'slug': 'discours',
|
||||||
user = session.query(User).filter(
|
'userpic': 'https://discours.io/image/logo-mini.svg'
|
||||||
User.email == authordata['email']).first()
|
}
|
||||||
if user is None:
|
|
||||||
user = session.query(User).filter(
|
|
||||||
User.slug == authordata['slug']).first()
|
|
||||||
slug = user['slug']
|
|
||||||
name = user['name']
|
|
||||||
userpic = user['userpic']
|
|
||||||
else:
|
|
||||||
# no application, no author!
|
|
||||||
slug = 'discours'
|
|
||||||
name = 'Дискурс'
|
|
||||||
userpic = 'https://discours.io/images/logo-mini.svg'
|
|
||||||
with local_session() as session:
|
|
||||||
user = session.query(User).filter(User.slug == slug).first()
|
|
||||||
r['authors'].append({
|
|
||||||
'id': user.id,
|
|
||||||
'slug': slug,
|
|
||||||
'name': name,
|
|
||||||
'userpic': userpic
|
|
||||||
})
|
|
||||||
|
|
||||||
r['layout'] = type2layout[entry['type']]
|
# set author data
|
||||||
|
shout_dict = r.copy()
|
||||||
metadata = get_metadata(r)
|
author = { # a short version for public listings
|
||||||
content = frontmatter.dumps(frontmatter.Post(body, **metadata))
|
'slug': userdata.get('slug', 'discours'),
|
||||||
|
'name': userdata.get('name', 'Дискурс'),
|
||||||
|
'userpic': userdata.get('userpic', '')
|
||||||
|
}
|
||||||
|
shout_dict['authors'] = [ author, ]
|
||||||
|
|
||||||
if entry['published']:
|
if entry['published']:
|
||||||
|
metadata = get_metadata(r)
|
||||||
|
content = frontmatter.dumps(frontmatter.Post(body, **metadata))
|
||||||
ext = 'md'
|
ext = 'md'
|
||||||
open('migration/content/' +
|
open('migration/content/' + r['layout'] + '/' + r['slug'] + '.' + ext, 'w').write(content)
|
||||||
r['layout'] + '/' + r['slug'] + '.' + ext, 'w').write(content)
|
try:
|
||||||
try:
|
shout_dict['createdAt'] = date_parse(r.get('createdAt')) if entry.get('createdAt') else ts
|
||||||
shout_dict = r.copy()
|
shout_dict['publishedAt'] = date_parse(entry.get('publishedAt')) if entry.get('published') else ts
|
||||||
shout_dict['authors'] = [user, ]
|
|
||||||
if entry.get('createdAt') is not None:
|
|
||||||
shout_dict['createdAt'] = parse(r.get('createdAt'))
|
|
||||||
else:
|
|
||||||
shout_dict['createdAt'] = ts
|
|
||||||
if entry.get('published'):
|
|
||||||
if entry.get('publishedAt') is not None:
|
|
||||||
shout_dict['publishedAt'] = parse(entry.get('publishedAt'))
|
|
||||||
else:
|
|
||||||
shout_dict['publishedAt'] = ts
|
|
||||||
del shout_dict['published']
|
|
||||||
|
|
||||||
|
if entry.get('deletedAt') is not None:
|
||||||
|
shout_dict['deletedAt'] = date_parse(entry.get('deletedAt'))
|
||||||
|
shout_dict['deletedBy'] = entry.get('deletedBy', '0')
|
||||||
|
|
||||||
|
del shout_dict['published'] # invalid keyword argument for Shout
|
||||||
|
del shout_dict['topics'] # FIXME: AttributeError: 'str' object has no attribute '_sa_instance_state'
|
||||||
|
del shout_dict['views'] # FIXME: TypeError: 'views' is an invalid keyword argument for Shout
|
||||||
|
del shout_dict['rating'] # FIXME: TypeError: 'rating' is an invalid keyword argument for Shout
|
||||||
|
del shout_dict['ratings']
|
||||||
|
|
||||||
|
# get user
|
||||||
|
|
||||||
|
user = None
|
||||||
|
email = userdata.get('email')
|
||||||
|
slug = userdata.get('slug')
|
||||||
|
with local_session() as session:
|
||||||
try:
|
try:
|
||||||
del shout_dict['topics'] # FIXME: AttributeError: 'str' object has no attribute '_sa_instance_state'
|
if email: user = session.query(User).filter(User.email == email).first()
|
||||||
del shout_dict['views'] # FIXME: TypeError: 'views' is an invalid keyword argument for Shout
|
if not user and slug: user = session.query(User).filter(User.slug == slug).first()
|
||||||
del shout_dict['rating'] # FIXME: TypeError: 'rating' is an invalid keyword argument for Shout
|
if not user and userdata: user = User.create(**userdata)
|
||||||
del shout_dict['ratings']
|
except:
|
||||||
s = Shout.create(**shout_dict)
|
print(userdata)
|
||||||
r['id'] = s.id
|
assert user, 'could not get a user'
|
||||||
|
|
||||||
if len(entry.get('ratings', [])) > 0:
|
shout_dict['authors'] = [ user, ]
|
||||||
# TODO: migrate shout ratings
|
try: s = Shout.create(**shout_dict)
|
||||||
shout_dict['ratings'] = []
|
except Exception as e: raise e
|
||||||
for shout_rating_old in entry['ratings']:
|
|
||||||
shout_rating = ShoutRating.create(
|
with local_session() as session:
|
||||||
rater_id = users_dict[shout_rating_old['createdBy']]['id'],
|
shout_dict['id'] = s.id
|
||||||
shout_id = s.id,
|
# shout ratings
|
||||||
value = shout_rating_old['value']
|
shout_dict['ratings'] = []
|
||||||
)
|
for shout_rating_old in entry.get('ratings',[]):
|
||||||
s.ratings.append(shout_rating.id)
|
rater = session.query(User).filter(User.old_id == shout_rating_old['createdBy']).first()
|
||||||
s.save()
|
if rater:
|
||||||
# TODO: migrate topics
|
shout_rating_dict = {
|
||||||
'''
|
'value': shout_rating_old['value'],
|
||||||
with local_session() as session:
|
'rater_id': rater.id,
|
||||||
for topic_slug in topic_slugs:
|
'shout_id': s.id
|
||||||
topic = session.query(Topic).filter(Topic.slug == topic_slug).first()
|
}
|
||||||
if not topic:
|
cts = shout_rating_old.get('createdAt')
|
||||||
topic_dict = migrateCategory()
|
if cts: shout_rating_dict['rater_id'] = date_parse(cts)
|
||||||
if topic_dict:
|
try: shout_rating = ShoutRating.create(**shout_rating_dict)
|
||||||
topic = Topic.create(**topic_dict)
|
except sqlalchemy.exc.IntegrityError: pass
|
||||||
s.topics = [ topic, ]
|
shout_dict['ratings'].append(shout_rating_dict)
|
||||||
s.save()
|
# shout topics
|
||||||
'''
|
shout_dict['topics'] = []
|
||||||
except Exception as e:
|
for topic_slug in r['topics']:
|
||||||
r['error'] = 'db error'
|
topic = session.query(Topic).filter(Topic.slug == topic_slug).first()
|
||||||
# pass
|
if not topic:
|
||||||
raise e
|
try: topic = Topic.create(**{ 'slug': topic_slug, 'title': topic_slug })
|
||||||
except Exception as e:
|
except Exception as e: raise e
|
||||||
if not r['body']: r['body'] = 'body moved'
|
shout_dict['topics'].append(topic.slug)
|
||||||
raise e
|
except Exception as e:
|
||||||
return r
|
if not shout_dict['body']: r['body'] = 'body moved'
|
||||||
|
raise e
|
||||||
|
return shout_dict # for json
|
||||||
|
|
|
@ -1,18 +1,8 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from os.path import abspath
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from orm.base import local_session
|
||||||
users_dict = json.loads(open(abspath('migration/data/users.dict.json')).read())
|
from orm import Topic
|
||||||
users_dict['0'] = {
|
from dateutil.parser import parse as date_parse
|
||||||
'id': 9999999,
|
|
||||||
'slug': 'discours.io',
|
|
||||||
'name': 'Дискурс',
|
|
||||||
'userpic': 'https://discours.io/images/logo-mini.svg',
|
|
||||||
'createdAt': '2016-03-05 22:22:00.350000'
|
|
||||||
}
|
|
||||||
|
|
||||||
ts = datetime.now()
|
|
||||||
|
|
||||||
def migrate(entry):
|
def migrate(entry):
|
||||||
'''
|
'''
|
||||||
|
@ -25,12 +15,26 @@ def migrate(entry):
|
||||||
children: [String] # and children
|
children: [String] # and children
|
||||||
}
|
}
|
||||||
'''
|
'''
|
||||||
creator = users_dict.get(entry['createdBy'], users_dict['0'])
|
if type(entry['createdAt']) == type(''):
|
||||||
return {
|
ts = date_parse(entry['createdAt'])
|
||||||
|
else:
|
||||||
|
ts = datetime.fromtimestamp(entry['createdAt']/1000)
|
||||||
|
topic_dict = {
|
||||||
'slug': entry['slug'],
|
'slug': entry['slug'],
|
||||||
'createdBy': creator['id'], # NOTE: uses an old user id
|
'createdBy': 0,
|
||||||
'createdAt': entry['createdAt'],
|
'createdAt': ts,
|
||||||
'title': entry['title'].lower(),
|
'title': entry['title'].lower(),
|
||||||
'parents': [],
|
'parents': [],
|
||||||
'children': []
|
'children': []
|
||||||
}
|
}
|
||||||
|
try:
|
||||||
|
with local_session() as session:
|
||||||
|
topic = session.query(Topic).filter(Topic.slug == entry['slug']).first()
|
||||||
|
if not topic: topic = Topic.create(**topic_dict)
|
||||||
|
topic_dict['id'] = topic.id
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
topic_dict['tag_id'] = entry['_id']
|
||||||
|
return topic_dict
|
|
@ -1,86 +1,110 @@
|
||||||
from orm import User, Role
|
from orm import User, Role, UserRating
|
||||||
import frontmatter
|
import frontmatter
|
||||||
from dateutil.parser import parse
|
from dateutil.parser import parse
|
||||||
from migration.html2text import html2text
|
from migration.html2text import html2text
|
||||||
# from migration.html2md import Converter
|
from orm.base import local_session
|
||||||
# markdown = Converter()
|
|
||||||
counter = 0
|
counter = 0
|
||||||
|
|
||||||
def migrate(entry, limit=668):
|
def migrate(entry, limit=668):
|
||||||
'''
|
'''
|
||||||
|
|
||||||
type User {
|
type User {
|
||||||
username: String! # email
|
username: String! # email
|
||||||
createdAt: DateTime!
|
createdAt: DateTime!
|
||||||
email: String
|
email: String
|
||||||
password: String
|
password: String
|
||||||
oauth: String # provider:token
|
oauth: String # provider:token
|
||||||
name: String # to display
|
name: String # to display
|
||||||
userpic: String
|
userpic: String
|
||||||
links: [String]
|
links: [String]
|
||||||
emailConfirmed: Boolean # should contain all emails too
|
emailConfirmed: Boolean # should contain all emails too
|
||||||
id: Int!
|
id: Int!
|
||||||
muted: Boolean
|
muted: Boolean
|
||||||
rating: Int
|
rating: Int
|
||||||
roles: [Role]
|
roles: [Role]
|
||||||
updatedAt: DateTime
|
updatedAt: DateTime
|
||||||
wasOnlineAt: DateTime
|
wasOnlineAt: DateTime
|
||||||
ratings: [Rating]
|
ratings: [Rating]
|
||||||
slug: String
|
slug: String
|
||||||
bio: String
|
bio: String
|
||||||
notifications: [Int]
|
notifications: [Int]
|
||||||
}
|
}
|
||||||
|
|
||||||
'''
|
'''
|
||||||
res = {}
|
res = {}
|
||||||
res['old_id'] = entry['_id']
|
res['old_id'] = entry['_id']
|
||||||
res['password'] = entry['services']['password'].get('bcrypt', '')
|
res['password'] = entry['services']['password'].get('bcrypt', '')
|
||||||
res['username'] = entry['emails'][0]['address']
|
res['username'] = entry['emails'][0]['address']
|
||||||
res['email'] = res['username']
|
res['email'] = res['username']
|
||||||
res['wasOnlineAt'] = parse(entry.get('loggedInAt', entry['createdAt']))
|
res['wasOnlineAt'] = parse(entry.get('loggedInAt', entry['createdAt']))
|
||||||
res['emailConfirmed'] = entry['emails'][0]['verified']
|
res['emailConfirmed'] = entry['emails'][0]['verified']
|
||||||
res['createdAt'] = parse(entry['createdAt'])
|
res['createdAt'] = parse(entry['createdAt'])
|
||||||
res['rating'] = entry['rating'] # number
|
res['rating'] = entry['rating'] # number
|
||||||
res['roles'] = [] # entry['roles'] # roles by community
|
res['roles'] = [] # entry['roles'] # roles by community
|
||||||
res['ratings'] = [] # entry['ratings']
|
res['ratings'] = [] # entry['ratings']
|
||||||
res['notifications'] = []
|
res['notifications'] = []
|
||||||
res['links'] = []
|
res['links'] = []
|
||||||
res['muted'] = False
|
res['muted'] = False
|
||||||
res['bio'] = html2text(entry.get('bio', ''))
|
res['bio'] = html2text(entry.get('bio', ''))
|
||||||
if entry['profile']:
|
res['name'] = 'anonymous'
|
||||||
res['slug'] = entry['profile'].get('path')
|
if not res['bio'].strip() or res['bio'] == '\n': del res['bio']
|
||||||
try:
|
if entry.get('profile'):
|
||||||
res['userpic'] = 'https://assets.discours.io/unsafe/100x/' + entry['profile']['thumborId']
|
# slug
|
||||||
except KeyError:
|
res['slug'] = entry['profile'].get('path')
|
||||||
try:
|
|
||||||
res['userpic'] = entry['profile']['image']['url']
|
# userpic
|
||||||
except KeyError:
|
try: res['userpic'] = 'https://assets.discours.io/unsafe/100x/' + entry['profile']['thumborId']
|
||||||
res['userpic'] = ''
|
except KeyError:
|
||||||
fn = entry['profile'].get('firstName', '')
|
try: res['userpic'] = entry['profile']['image']['url']
|
||||||
ln = entry['profile'].get('lastName', '')
|
except KeyError: res['userpic'] = ''
|
||||||
name = res['slug'] if res['slug'] else 'anonymous'
|
|
||||||
name = fn if fn else name
|
# name
|
||||||
name = (name + ' ' + ln) if ln else name
|
fn = entry['profile'].get('firstName', '')
|
||||||
name = entry['profile']['path'] if len(name) < 2 else name
|
ln = entry['profile'].get('lastName', '')
|
||||||
res['name'] = name
|
name = res['slug'] if res['slug'] else 'anonymous'
|
||||||
fb = entry['profile'].get('facebook', False)
|
name = fn if fn else name
|
||||||
if fb:
|
name = (name + ' ' + ln) if ln else name
|
||||||
res['links'].append(fb)
|
name = entry['profile']['path'].lower().replace(' ', '-') if len(name) < 2 else name
|
||||||
vk = entry['profile'].get('vkontakte', False)
|
res['name'] = name
|
||||||
if vk:
|
|
||||||
res['links'].append(vk)
|
# links
|
||||||
tr = entry['profile'].get('twitter', False)
|
fb = entry['profile'].get('facebook', False)
|
||||||
if tr:
|
if fb:
|
||||||
res['links'].append(tr)
|
res['links'].append(fb)
|
||||||
ws = entry['profile'].get('website', False)
|
vk = entry['profile'].get('vkontakte', False)
|
||||||
if ws:
|
if vk:
|
||||||
res['links'].append(ws)
|
res['links'].append(vk)
|
||||||
if not res['slug']:
|
tr = entry['profile'].get('twitter', False)
|
||||||
res['slug'] = res['links'][0].split('/')[-1]
|
if tr:
|
||||||
if not res['slug']:
|
res['links'].append(tr)
|
||||||
res['slug'] = res['email'].split('@')[0]
|
ws = entry['profile'].get('website', False)
|
||||||
else:
|
if ws:
|
||||||
old = res['old_id']
|
res['links'].append(ws)
|
||||||
user = User.create(**res.copy())
|
|
||||||
res['id'] = user.id
|
# some checks
|
||||||
return res
|
if not res['slug'] and len(res['links']) > 0: res['slug'] = res['links'][0].split('/')[-1]
|
||||||
|
|
||||||
|
res['slug'] = res.get('slug', res['email'].split('@')[0])
|
||||||
|
old = res['old_id']
|
||||||
|
user = User.create(**res.copy())
|
||||||
|
res['id'] = user.id
|
||||||
|
res['ratings'] = []
|
||||||
|
for user_rating_old in entry.get('ratings',[]):
|
||||||
|
with local_session() as session:
|
||||||
|
rater = session.query(User).filter(old == user_rating_old['createdBy']).first()
|
||||||
|
if rater:
|
||||||
|
user_rating_dict = {
|
||||||
|
'value': user_rating_old['value'],
|
||||||
|
'rater_id': rater.id,
|
||||||
|
'user_id': user.id
|
||||||
|
}
|
||||||
|
cts = user_rating_old.get('createdAt')
|
||||||
|
if cts: user_rating_dict['createdAt'] = date_parse(cts)
|
||||||
|
try:
|
||||||
|
user_rating = UserRating.create(**user_rating_dict)
|
||||||
|
res['ratings'].append(user_rating_dict)
|
||||||
|
except Exception as e:
|
||||||
|
print(comment_rating_dict)
|
||||||
|
raise e
|
||||||
|
return res
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from orm.rbac import Operation, Resource, Permission, Role
|
from orm.rbac import Operation, Resource, Permission, Role
|
||||||
from orm.community import Community
|
from orm.community import Community
|
||||||
from orm.user import User
|
from orm.user import User, UserRating
|
||||||
from orm.message import Message
|
from orm.message import Message
|
||||||
from orm.topic import Topic
|
from orm.topic import Topic
|
||||||
from orm.notification import Notification
|
from orm.notification import Notification
|
||||||
|
@ -9,7 +9,7 @@ from orm.shout import Shout, ShoutAuthor, ShoutTopic, ShoutRating, ShoutViewByDa
|
||||||
from orm.base import Base, engine, local_session
|
from orm.base import Base, engine, local_session
|
||||||
from orm.comment import Comment, CommentRating
|
from orm.comment import Comment, CommentRating
|
||||||
|
|
||||||
__all__ = ["User", "Role", "Operation", "Permission", "Message", "Shout", "Topic", "Notification", "ShoutRating", "Comment", "CommentRating"]
|
__all__ = ["User", "Role", "Operation", "Permission", "Message", "Shout", "Topic", "Notification", "ShoutRating", "Comment", "CommentRating", "UserRating"]
|
||||||
|
|
||||||
Base.metadata.create_all(engine)
|
Base.metadata.create_all(engine)
|
||||||
Operation.init_table()
|
Operation.init_table()
|
||||||
|
|
|
@ -22,6 +22,7 @@ class Comment(Base):
|
||||||
body: str = Column(String, nullable=False, comment="Comment Body")
|
body: str = Column(String, nullable=False, comment="Comment Body")
|
||||||
createdAt = Column(DateTime, nullable=False, default = datetime.now, comment="Created at")
|
createdAt = Column(DateTime, nullable=False, default = datetime.now, comment="Created at")
|
||||||
updatedAt = Column(DateTime, nullable=True, comment="Updated at")
|
updatedAt = Column(DateTime, nullable=True, comment="Updated at")
|
||||||
|
updatedBy = Column(ForeignKey("user.id"), nullable=True, comment="Last Editor")
|
||||||
deletedAt = Column(DateTime, nullable=True, comment="Deleted at")
|
deletedAt = Column(DateTime, nullable=True, comment="Deleted at")
|
||||||
deletedBy = Column(ForeignKey("user.id"), nullable=True, comment="Deleted by")
|
deletedBy = Column(ForeignKey("user.id"), nullable=True, comment="Deleted by")
|
||||||
shout: int = Column(ForeignKey("shout.id"), nullable=True, comment="Shout ID")
|
shout: int = Column(ForeignKey("shout.id"), nullable=True, comment="Shout ID")
|
||||||
|
|
|
@ -17,8 +17,8 @@ class UserNotifications(Base):
|
||||||
kind: str = Column(String, ForeignKey("notification.kind"))
|
kind: str = Column(String, ForeignKey("notification.kind"))
|
||||||
values: JSONType = Column(JSONType, nullable = True) # [ <var1>, .. ]
|
values: JSONType = Column(JSONType, nullable = True) # [ <var1>, .. ]
|
||||||
|
|
||||||
class UserRatings(Base):
|
class UserRating(Base):
|
||||||
__tablename__ = "user_ratings"
|
__tablename__ = "user_rating"
|
||||||
|
|
||||||
id = None
|
id = None
|
||||||
rater_id = Column(ForeignKey('user.id'), primary_key = True)
|
rater_id = Column(ForeignKey('user.id'), primary_key = True)
|
||||||
|
@ -55,7 +55,7 @@ class User(Base):
|
||||||
links: JSONType = Column(JSONType, nullable=True, comment="Links")
|
links: JSONType = Column(JSONType, nullable=True, comment="Links")
|
||||||
oauth: str = Column(String, nullable=True)
|
oauth: str = Column(String, nullable=True)
|
||||||
notifications = relationship(lambda: UserNotifications)
|
notifications = relationship(lambda: UserNotifications)
|
||||||
ratings = relationship(UserRatings, foreign_keys=UserRatings.user_id)
|
ratings = relationship(UserRating, foreign_keys=UserRating.user_id)
|
||||||
roles = relationship(lambda: Role, secondary=UserRoles)
|
roles = relationship(lambda: Role, secondary=UserRoles)
|
||||||
topics = relationship(lambda: Topic, secondary=UserTopics)
|
topics = relationship(lambda: Topic, secondary=UserTopics)
|
||||||
old_id: str = Column(String, nullable = True)
|
old_id: str = Column(String, nullable = True)
|
||||||
|
|
931
poetry.lock
generated
Normal file
931
poetry.lock
generated
Normal file
|
@ -0,0 +1,931 @@
|
||||||
|
[[package]]
|
||||||
|
name = "aioredis"
|
||||||
|
version = "2.0.0"
|
||||||
|
description = "asyncio (PEP 3156) Redis support"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
async-timeout = "*"
|
||||||
|
typing-extensions = "*"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
hiredis = ["hiredis (>=1.0)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anyio"
|
||||||
|
version = "3.3.3"
|
||||||
|
description = "High level compatibility layer for multiple asynchronous event loop implementations"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6.2"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
idna = ">=2.8"
|
||||||
|
sniffio = ">=1.1"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
doc = ["sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"]
|
||||||
|
test = ["coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "pytest (>=6.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "mock (>=4)", "uvloop (>=0.15)"]
|
||||||
|
trio = ["trio (>=0.16)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ariadne"
|
||||||
|
version = "0.13.0"
|
||||||
|
description = "Ariadne is a Python library for implementing GraphQL servers."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
graphql-core = ">=3.1.0"
|
||||||
|
starlette = "<0.15"
|
||||||
|
typing-extensions = ">=3.6.0"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
asgi-file-uploads = ["python-multipart (>=0.0.5)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "asgiref"
|
||||||
|
version = "3.4.1"
|
||||||
|
description = "ASGI specs, helper code, and adapters"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
tests = ["pytest", "pytest-asyncio", "mypy (>=0.800)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "async-timeout"
|
||||||
|
version = "3.0.1"
|
||||||
|
description = "Timeout context manager for asyncio programs"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.5.3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "authlib"
|
||||||
|
version = "0.15.4"
|
||||||
|
description = "The ultimate Python library in building OAuth and OpenID Connect servers."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
cryptography = "*"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
client = ["requests"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "beautifulsoup4"
|
||||||
|
version = "4.10.0"
|
||||||
|
description = "Screen-scraping library"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">3.0.0"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
soupsieve = ">1.2"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
html5lib = ["html5lib"]
|
||||||
|
lxml = ["lxml"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bs4"
|
||||||
|
version = "0.0.1"
|
||||||
|
description = "Dummy package for Beautiful Soup"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
beautifulsoup4 = "*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bson"
|
||||||
|
version = "0.5.10"
|
||||||
|
description = "BSON codec for Python"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
python-dateutil = ">=2.4.0"
|
||||||
|
six = ">=1.9.0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "certifi"
|
||||||
|
version = "2021.10.8"
|
||||||
|
description = "Python package for providing Mozilla's CA Bundle."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cffi"
|
||||||
|
version = "1.15.0"
|
||||||
|
description = "Foreign Function Interface for Python calling C code."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
pycparser = "*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "charset-normalizer"
|
||||||
|
version = "2.0.7"
|
||||||
|
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.5.0"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
unicode_backport = ["unicodedata2"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "click"
|
||||||
|
version = "8.0.3"
|
||||||
|
description = "Composable command line interface toolkit"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colorama"
|
||||||
|
version = "0.4.4"
|
||||||
|
description = "Cross-platform colored terminal text."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cryptography"
|
||||||
|
version = "35.0.0"
|
||||||
|
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
cffi = ">=1.12"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
|
||||||
|
docstest = ["doc8", "pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"]
|
||||||
|
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
|
||||||
|
sdist = ["setuptools_rust (>=0.11.4)"]
|
||||||
|
ssh = ["bcrypt (>=3.1.5)"]
|
||||||
|
test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "graphql-core"
|
||||||
|
version = "3.1.6"
|
||||||
|
description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6,<4"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "greenlet"
|
||||||
|
version = "1.1.2"
|
||||||
|
description = "Lightweight in-process concurrent programming"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
docs = ["sphinx"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "h11"
|
||||||
|
version = "0.12.0"
|
||||||
|
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "html2text"
|
||||||
|
version = "2020.1.16"
|
||||||
|
description = "Turn HTML into equivalent Markdown-structured text."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "httpcore"
|
||||||
|
version = "0.13.7"
|
||||||
|
description = "A minimal low-level HTTP client."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
anyio = ">=3.0.0,<4.0.0"
|
||||||
|
h11 = ">=0.11,<0.13"
|
||||||
|
sniffio = ">=1.0.0,<2.0.0"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
http2 = ["h2 (>=3,<5)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "httpx"
|
||||||
|
version = "0.20.0"
|
||||||
|
description = "The next generation HTTP client."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
certifi = "*"
|
||||||
|
charset-normalizer = "*"
|
||||||
|
httpcore = ">=0.13.3,<0.14.0"
|
||||||
|
rfc3986 = {version = ">=1.3,<2", extras = ["idna2008"]}
|
||||||
|
sniffio = "*"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
brotli = ["brotlicffi", "brotli"]
|
||||||
|
cli = ["click (>=8.0.0,<9.0.0)", "rich (>=10.0.0,<11.0.0)", "pygments (>=2.0.0,<3.0.0)"]
|
||||||
|
http2 = ["h2 (>=3,<5)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "idna"
|
||||||
|
version = "3.3"
|
||||||
|
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itsdangerous"
|
||||||
|
version = "2.0.1"
|
||||||
|
description = "Safely pass data to untrusted environments and back."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "passlib"
|
||||||
|
version = "1.7.4"
|
||||||
|
description = "comprehensive password hashing framework supporting over 30 schemes"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
argon2 = ["argon2-cffi (>=18.2.0)"]
|
||||||
|
bcrypt = ["bcrypt (>=3.1.0)"]
|
||||||
|
build_docs = ["sphinx (>=1.6)", "sphinxcontrib-fulltoc (>=1.2.0)", "cloud-sptheme (>=1.10.1)"]
|
||||||
|
totp = ["cryptography"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "psycopg2"
|
||||||
|
version = "2.9.1"
|
||||||
|
description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "psycopg2-binary"
|
||||||
|
version = "2.9.1"
|
||||||
|
description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pycparser"
|
||||||
|
version = "2.20"
|
||||||
|
description = "C parser in Python"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pydantic"
|
||||||
|
version = "1.8.2"
|
||||||
|
description = "Data validation and settings management using python 3.6 type hinting"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6.1"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
typing-extensions = ">=3.7.4.3"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
dotenv = ["python-dotenv (>=0.10.4)"]
|
||||||
|
email = ["email-validator (>=1.0.3)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pyjwt"
|
||||||
|
version = "2.2.0"
|
||||||
|
description = "JSON Web Token implementation in Python"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
crypto = ["cryptography (>=3.3.1)"]
|
||||||
|
dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"]
|
||||||
|
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||||
|
tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "python-dateutil"
|
||||||
|
version = "2.8.2"
|
||||||
|
description = "Extensions to the standard Python datetime module"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
six = ">=1.5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "python-frontmatter"
|
||||||
|
version = "1.0.0"
|
||||||
|
description = "Parse and manage posts with YAML (or other) frontmatter"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
PyYAML = "*"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
docs = ["sphinx"]
|
||||||
|
test = ["pytest", "toml", "pyaml"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pyyaml"
|
||||||
|
version = "6.0"
|
||||||
|
description = "YAML parser and emitter for Python"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "requests"
|
||||||
|
version = "2.26.0"
|
||||||
|
description = "Python HTTP for Humans."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
certifi = ">=2017.4.17"
|
||||||
|
charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""}
|
||||||
|
idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""}
|
||||||
|
urllib3 = ">=1.21.1,<1.27"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
|
||||||
|
use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rfc3986"
|
||||||
|
version = "1.5.0"
|
||||||
|
description = "Validating URI References per RFC 3986"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
idna = {version = "*", optional = true, markers = "extra == \"idna2008\""}
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
idna2008 = ["idna"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "six"
|
||||||
|
version = "1.16.0"
|
||||||
|
description = "Python 2 and 3 compatibility utilities"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sniffio"
|
||||||
|
version = "1.2.0"
|
||||||
|
description = "Sniff out which async library your code is running under"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "soupsieve"
|
||||||
|
version = "2.2.1"
|
||||||
|
description = "A modern CSS selector implementation for Beautiful Soup."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sqlalchemy"
|
||||||
|
version = "1.4.25"
|
||||||
|
description = "Database Abstraction Library"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"}
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
aiomysql = ["greenlet (!=0.4.17)", "aiomysql"]
|
||||||
|
aiosqlite = ["typing_extensions (!=3.10.0.1)", "greenlet (!=0.4.17)", "aiosqlite"]
|
||||||
|
asyncio = ["greenlet (!=0.4.17)"]
|
||||||
|
asyncmy = ["greenlet (!=0.4.17)", "asyncmy (>=0.2.0)"]
|
||||||
|
mariadb_connector = ["mariadb (>=1.0.1)"]
|
||||||
|
mssql = ["pyodbc"]
|
||||||
|
mssql_pymssql = ["pymssql"]
|
||||||
|
mssql_pyodbc = ["pyodbc"]
|
||||||
|
mypy = ["sqlalchemy2-stubs", "mypy (>=0.910)"]
|
||||||
|
mysql = ["mysqlclient (>=1.4.0,<2)", "mysqlclient (>=1.4.0)"]
|
||||||
|
mysql_connector = ["mysql-connector-python"]
|
||||||
|
oracle = ["cx_oracle (>=7,<8)", "cx_oracle (>=7)"]
|
||||||
|
postgresql = ["psycopg2 (>=2.7)"]
|
||||||
|
postgresql_asyncpg = ["greenlet (!=0.4.17)", "asyncpg"]
|
||||||
|
postgresql_pg8000 = ["pg8000 (>=1.16.6)"]
|
||||||
|
postgresql_psycopg2binary = ["psycopg2-binary"]
|
||||||
|
postgresql_psycopg2cffi = ["psycopg2cffi"]
|
||||||
|
pymysql = ["pymysql (<1)", "pymysql"]
|
||||||
|
sqlcipher = ["sqlcipher3-binary"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "starlette"
|
||||||
|
version = "0.14.2"
|
||||||
|
description = "The little ASGI library that shines."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.6"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
full = ["aiofiles", "graphene", "itsdangerous", "jinja2", "python-multipart", "pyyaml", "requests"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "transliterate"
|
||||||
|
version = "1.10.2"
|
||||||
|
description = "Bi-directional transliterator for Python"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
six = ">=1.1.0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "typing-extensions"
|
||||||
|
version = "3.10.0.2"
|
||||||
|
description = "Backported and Experimental Type Hints for Python 3.5+"
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "urllib3"
|
||||||
|
version = "1.26.7"
|
||||||
|
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
brotli = ["brotlipy (>=0.6.0)"]
|
||||||
|
secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
|
||||||
|
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "uvicorn"
|
||||||
|
version = "0.15.0"
|
||||||
|
description = "The lightning-fast ASGI server."
|
||||||
|
category = "main"
|
||||||
|
optional = false
|
||||||
|
python-versions = "*"
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
asgiref = ">=3.4.0"
|
||||||
|
click = ">=7.0"
|
||||||
|
h11 = ">=0.8"
|
||||||
|
|
||||||
|
[package.extras]
|
||||||
|
standard = ["websockets (>=9.1)", "httptools (>=0.2.0,<0.3.0)", "watchgod (>=0.6)", "python-dotenv (>=0.13)", "PyYAML (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "colorama (>=0.4)"]
|
||||||
|
|
||||||
|
[metadata]
|
||||||
|
lock-version = "1.1"
|
||||||
|
python-versions = "^3.9"
|
||||||
|
content-hash = "54e5e392e1885a78c1ef529d0443bee62009835e8d24884a7fdf539669a2fd17"
|
||||||
|
|
||||||
|
[metadata.files]
|
||||||
|
aioredis = [
|
||||||
|
{file = "aioredis-2.0.0-py3-none-any.whl", hash = "sha256:9921d68a3df5c5cdb0d5b49ad4fc88a4cfdd60c108325df4f0066e8410c55ffb"},
|
||||||
|
{file = "aioredis-2.0.0.tar.gz", hash = "sha256:3a2de4b614e6a5f8e104238924294dc4e811aefbe17ddf52c04a93cbf06e67db"},
|
||||||
|
]
|
||||||
|
anyio = [
|
||||||
|
{file = "anyio-3.3.3-py3-none-any.whl", hash = "sha256:56ceaeed2877723578b1341f4f68c29081db189cfb40a97d1922b9513f6d7db6"},
|
||||||
|
{file = "anyio-3.3.3.tar.gz", hash = "sha256:8eccec339cb4a856c94a75d50fc1d451faf32a05ef406be462e2efc59c9838b0"},
|
||||||
|
]
|
||||||
|
ariadne = [
|
||||||
|
{file = "ariadne-0.13.0-py3-none-any.whl", hash = "sha256:56bc3609a0512920f06e9312f8ea6db3c8e4a7cd77f31fbed388f5dba6d589c0"},
|
||||||
|
{file = "ariadne-0.13.0.tar.gz", hash = "sha256:e00abd7eb5869b59a638f1e3a7743445bf387236048cf1b0eb9d7c506dcd37c5"},
|
||||||
|
]
|
||||||
|
asgiref = [
|
||||||
|
{file = "asgiref-3.4.1-py3-none-any.whl", hash = "sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214"},
|
||||||
|
{file = "asgiref-3.4.1.tar.gz", hash = "sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9"},
|
||||||
|
]
|
||||||
|
async-timeout = [
|
||||||
|
{file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"},
|
||||||
|
{file = "async_timeout-3.0.1-py3-none-any.whl", hash = "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3"},
|
||||||
|
]
|
||||||
|
authlib = [
|
||||||
|
{file = "Authlib-0.15.4-py2.py3-none-any.whl", hash = "sha256:d9fe5edb59801b16583faa86f88d798d99d952979b9616d5c735b9170b41ae2c"},
|
||||||
|
{file = "Authlib-0.15.4.tar.gz", hash = "sha256:37df3a2554bc6fe0da3cc6848c44fac2ae40634a7f8fc72543947f4330b26464"},
|
||||||
|
]
|
||||||
|
beautifulsoup4 = [
|
||||||
|
{file = "beautifulsoup4-4.10.0-py3-none-any.whl", hash = "sha256:9a315ce70049920ea4572a4055bc4bd700c940521d36fc858205ad4fcde149bf"},
|
||||||
|
{file = "beautifulsoup4-4.10.0.tar.gz", hash = "sha256:c23ad23c521d818955a4151a67d81580319d4bf548d3d49f4223ae041ff98891"},
|
||||||
|
]
|
||||||
|
bs4 = [
|
||||||
|
{file = "bs4-0.0.1.tar.gz", hash = "sha256:36ecea1fd7cc5c0c6e4a1ff075df26d50da647b75376626cc186e2212886dd3a"},
|
||||||
|
]
|
||||||
|
bson = [
|
||||||
|
{file = "bson-0.5.10.tar.gz", hash = "sha256:d6511b2ab051139a9123c184de1a04227262173ad593429d21e443d6462d6590"},
|
||||||
|
]
|
||||||
|
certifi = [
|
||||||
|
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
|
||||||
|
{file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"},
|
||||||
|
]
|
||||||
|
cffi = [
|
||||||
|
{file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"},
|
||||||
|
{file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"},
|
||||||
|
{file = "cffi-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14"},
|
||||||
|
{file = "cffi-1.15.0-cp27-cp27m-win32.whl", hash = "sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474"},
|
||||||
|
{file = "cffi-1.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6"},
|
||||||
|
{file = "cffi-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27"},
|
||||||
|
{file = "cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-win32.whl", hash = "sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55"},
|
||||||
|
{file = "cffi-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e"},
|
||||||
|
{file = "cffi-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66"},
|
||||||
|
{file = "cffi-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-win32.whl", hash = "sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c"},
|
||||||
|
{file = "cffi-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-win32.whl", hash = "sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a"},
|
||||||
|
{file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"},
|
||||||
|
{file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"},
|
||||||
|
]
|
||||||
|
charset-normalizer = [
|
||||||
|
{file = "charset-normalizer-2.0.7.tar.gz", hash = "sha256:e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0"},
|
||||||
|
{file = "charset_normalizer-2.0.7-py3-none-any.whl", hash = "sha256:f7af805c321bfa1ce6714c51f254e0d5bb5e5834039bc17db7ebe3a4cec9492b"},
|
||||||
|
]
|
||||||
|
click = [
|
||||||
|
{file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"},
|
||||||
|
{file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"},
|
||||||
|
]
|
||||||
|
colorama = [
|
||||||
|
{file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
|
||||||
|
{file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
|
||||||
|
]
|
||||||
|
cryptography = [
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:d57e0cdc1b44b6cdf8af1d01807db06886f10177469312fbde8f44ccbb284bc9"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:ced40344e811d6abba00295ced98c01aecf0c2de39481792d87af4fa58b7b4d6"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:54b2605e5475944e2213258e0ab8696f4f357a31371e538ef21e8d61c843c28d"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7b7ceeff114c31f285528ba8b390d3e9cfa2da17b56f11d366769a807f17cbaa"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d69645f535f4b2c722cfb07a8eab916265545b3475fdb34e0be2f4ee8b0b15e"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2d0e0acc20ede0f06ef7aa58546eee96d2592c00f450c9acb89c5879b61992"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:07bb7fbfb5de0980590ddfc7f13081520def06dc9ed214000ad4372fb4e3c7f6"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7eba2cebca600a7806b893cb1d541a6e910afa87e97acf2021a22b32da1df52d"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:18d90f4711bf63e2fb21e8c8e51ed8189438e6b35a6d996201ebd98a26abbbe6"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-win32.whl", hash = "sha256:c10c797ac89c746e488d2ee92bd4abd593615694ee17b2500578b63cad6b93a8"},
|
||||||
|
{file = "cryptography-35.0.0-cp36-abi3-win_amd64.whl", hash = "sha256:7075b304cd567694dc692ffc9747f3e9cb393cc4aa4fb7b9f3abd6f5c4e43588"},
|
||||||
|
{file = "cryptography-35.0.0-pp36-pypy36_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a688ebcd08250eab5bb5bca318cc05a8c66de5e4171a65ca51db6bd753ff8953"},
|
||||||
|
{file = "cryptography-35.0.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d99915d6ab265c22873f1b4d6ea5ef462ef797b4140be4c9d8b179915e0985c6"},
|
||||||
|
{file = "cryptography-35.0.0-pp36-pypy36_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:928185a6d1ccdb816e883f56ebe92e975a262d31cc536429041921f8cb5a62fd"},
|
||||||
|
{file = "cryptography-35.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ebeddd119f526bcf323a89f853afb12e225902a24d29b55fe18dd6fcb2838a76"},
|
||||||
|
{file = "cryptography-35.0.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:22a38e96118a4ce3b97509443feace1d1011d0571fae81fc3ad35f25ba3ea999"},
|
||||||
|
{file = "cryptography-35.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb80e8a1f91e4b7ef8b33041591e6d89b2b8e122d787e87eeb2b08da71bb16ad"},
|
||||||
|
{file = "cryptography-35.0.0-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:abb5a361d2585bb95012a19ed9b2c8f412c5d723a9836418fab7aaa0243e67d2"},
|
||||||
|
{file = "cryptography-35.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1ed82abf16df40a60942a8c211251ae72858b25b7421ce2497c2eb7a1cee817c"},
|
||||||
|
{file = "cryptography-35.0.0.tar.gz", hash = "sha256:9933f28f70d0517686bd7de36166dda42094eac49415459d9bdf5e7df3e0086d"},
|
||||||
|
]
|
||||||
|
graphql-core = [
|
||||||
|
{file = "graphql-core-3.1.6.tar.gz", hash = "sha256:e65975b6a13878f9113a1fa5320760585b522d139944e005936b1b8358d0651a"},
|
||||||
|
{file = "graphql_core-3.1.6-py3-none-any.whl", hash = "sha256:c78d09596d347e1cffd266c5384abfedf43ed1eae08729773bebb3d527fe5a14"},
|
||||||
|
]
|
||||||
|
greenlet = [
|
||||||
|
{file = "greenlet-1.1.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:58df5c2a0e293bf665a51f8a100d3e9956febfbf1d9aaf8c0677cf70218910c6"},
|
||||||
|
{file = "greenlet-1.1.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:aec52725173bd3a7b56fe91bc56eccb26fbdff1386ef123abb63c84c5b43b63a"},
|
||||||
|
{file = "greenlet-1.1.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:833e1551925ed51e6b44c800e71e77dacd7e49181fdc9ac9a0bf3714d515785d"},
|
||||||
|
{file = "greenlet-1.1.2-cp27-cp27m-win32.whl", hash = "sha256:aa5b467f15e78b82257319aebc78dd2915e4c1436c3c0d1ad6f53e47ba6e2713"},
|
||||||
|
{file = "greenlet-1.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:40b951f601af999a8bf2ce8c71e8aaa4e8c6f78ff8afae7b808aae2dc50d4c40"},
|
||||||
|
{file = "greenlet-1.1.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:95e69877983ea39b7303570fa6760f81a3eec23d0e3ab2021b7144b94d06202d"},
|
||||||
|
{file = "greenlet-1.1.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:356b3576ad078c89a6107caa9c50cc14e98e3a6c4874a37c3e0273e4baf33de8"},
|
||||||
|
{file = "greenlet-1.1.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:8639cadfda96737427330a094476d4c7a56ac03de7265622fcf4cfe57c8ae18d"},
|
||||||
|
{file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e5306482182170ade15c4b0d8386ded995a07d7cc2ca8f27958d34d6736497"},
|
||||||
|
{file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6a36bb9474218c7a5b27ae476035497a6990e21d04c279884eb10d9b290f1b1"},
|
||||||
|
{file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abb7a75ed8b968f3061327c433a0fbd17b729947b400747c334a9c29a9af6c58"},
|
||||||
|
{file = "greenlet-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:14d4f3cd4e8b524ae9b8aa567858beed70c392fdec26dbdb0a8a418392e71708"},
|
||||||
|
{file = "greenlet-1.1.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:17ff94e7a83aa8671a25bf5b59326ec26da379ace2ebc4411d690d80a7fbcf23"},
|
||||||
|
{file = "greenlet-1.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9f3cba480d3deb69f6ee2c1825060177a22c7826431458c697df88e6aeb3caee"},
|
||||||
|
{file = "greenlet-1.1.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:fa877ca7f6b48054f847b61d6fa7bed5cebb663ebc55e018fda12db09dcc664c"},
|
||||||
|
{file = "greenlet-1.1.2-cp35-cp35m-win32.whl", hash = "sha256:7cbd7574ce8e138bda9df4efc6bf2ab8572c9aff640d8ecfece1b006b68da963"},
|
||||||
|
{file = "greenlet-1.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:903bbd302a2378f984aef528f76d4c9b1748f318fe1294961c072bdc7f2ffa3e"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:049fe7579230e44daef03a259faa24511d10ebfa44f69411d99e6a184fe68073"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:dd0b1e9e891f69e7675ba5c92e28b90eaa045f6ab134ffe70b52e948aa175b3c"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:7418b6bfc7fe3331541b84bb2141c9baf1ec7132a7ecd9f375912eca810e714e"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9d29ca8a77117315101425ec7ec2a47a22ccf59f5593378fc4077ac5b754fce"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21915eb821a6b3d9d8eefdaf57d6c345b970ad722f856cd71739493ce003ad08"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eff9d20417ff9dcb0d25e2defc2574d10b491bf2e693b4e491914738b7908168"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-win32.whl", hash = "sha256:32ca72bbc673adbcfecb935bb3fb1b74e663d10a4b241aaa2f5a75fe1d1f90aa"},
|
||||||
|
{file = "greenlet-1.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f0214eb2a23b85528310dad848ad2ac58e735612929c8072f6093f3585fd342d"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:b92e29e58bef6d9cfd340c72b04d74c4b4e9f70c9fa7c78b674d1fec18896dc4"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:fdcec0b8399108577ec290f55551d926d9a1fa6cad45882093a7a07ac5ec147b"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:93f81b134a165cc17123626ab8da2e30c0455441d4ab5576eed73a64c025b25c"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e12bdc622676ce47ae9abbf455c189e442afdde8818d9da983085df6312e7a1"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c790abda465726cfb8bb08bd4ca9a5d0a7bd77c7ac1ca1b839ad823b948ea28"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f276df9830dba7a333544bd41070e8175762a7ac20350786b322b714b0e654f5"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-win32.whl", hash = "sha256:64e6175c2e53195278d7388c454e0b30997573f3f4bd63697f88d855f7a6a1fc"},
|
||||||
|
{file = "greenlet-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b11548073a2213d950c3f671aa88e6f83cda6e2fb97a8b6317b1b5b33d850e06"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9633b3034d3d901f0a46b7939f8c4d64427dfba6bbc5a36b1a67364cf148a1b0"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:eb6ea6da4c787111adf40f697b4e58732ee0942b5d3bd8f435277643329ba627"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f3acda1924472472ddd60c29e5b9db0cec629fbe3c5c5accb74d6d6d14773478"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e859fcb4cbe93504ea18008d1df98dee4f7766db66c435e4882ab35cf70cac43"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e44c8afdbe5467e4f7b5851be223be68adb4272f44696ee71fe46b7036a711"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec8c433b3ab0419100bd45b47c9c8551248a5aee30ca5e9d399a0b57ac04651b"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-win32.whl", hash = "sha256:288c6a76705dc54fba69fbcb59904ae4ad768b4c768839b8ca5fdadec6dd8cfd"},
|
||||||
|
{file = "greenlet-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:8d2f1fb53a421b410751887eb4ff21386d119ef9cde3797bf5e7ed49fb51a3b3"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:166eac03e48784a6a6e0e5f041cfebb1ab400b394db188c48b3a84737f505b67"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:572e1787d1460da79590bf44304abbc0a2da944ea64ec549188fa84d89bba7ab"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:be5f425ff1f5f4b3c1e33ad64ab994eed12fc284a6ea71c5243fd564502ecbe5"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1692f7d6bc45e3200844be0dba153612103db241691088626a33ff1f24a0d88"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7227b47e73dedaa513cdebb98469705ef0d66eb5a1250144468e9c3097d6b59b"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ff61ff178250f9bb3cd89752df0f1dd0e27316a8bd1465351652b1b4a4cdfd3"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-win32.whl", hash = "sha256:f70a9e237bb792c7cc7e44c531fd48f5897961701cdaa06cf22fc14965c496cf"},
|
||||||
|
{file = "greenlet-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:013d61294b6cd8fe3242932c1c5e36e5d1db2c8afb58606c5a67efce62c1f5fd"},
|
||||||
|
{file = "greenlet-1.1.2.tar.gz", hash = "sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a"},
|
||||||
|
]
|
||||||
|
h11 = [
|
||||||
|
{file = "h11-0.12.0-py3-none-any.whl", hash = "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6"},
|
||||||
|
{file = "h11-0.12.0.tar.gz", hash = "sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042"},
|
||||||
|
]
|
||||||
|
html2text = [
|
||||||
|
{file = "html2text-2020.1.16-py3-none-any.whl", hash = "sha256:c7c629882da0cf377d66f073329ccf34a12ed2adf0169b9285ae4e63ef54c82b"},
|
||||||
|
{file = "html2text-2020.1.16.tar.gz", hash = "sha256:e296318e16b059ddb97f7a8a1d6a5c1d7af4544049a01e261731d2d5cc277bbb"},
|
||||||
|
]
|
||||||
|
httpcore = [
|
||||||
|
{file = "httpcore-0.13.7-py3-none-any.whl", hash = "sha256:369aa481b014cf046f7067fddd67d00560f2f00426e79569d99cb11245134af0"},
|
||||||
|
{file = "httpcore-0.13.7.tar.gz", hash = "sha256:036f960468759e633574d7c121afba48af6419615d36ab8ede979f1ad6276fa3"},
|
||||||
|
]
|
||||||
|
httpx = [
|
||||||
|
{file = "httpx-0.20.0-py3-none-any.whl", hash = "sha256:33af5aad9bdc82ef1fc89219c1e36f5693bf9cd0ebe330884df563445682c0f8"},
|
||||||
|
{file = "httpx-0.20.0.tar.gz", hash = "sha256:09606d630f070d07f9ff28104fbcea429ea0014c1e89ac90b4d8de8286c40e7b"},
|
||||||
|
]
|
||||||
|
idna = [
|
||||||
|
{file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
|
||||||
|
{file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
|
||||||
|
]
|
||||||
|
itsdangerous = [
|
||||||
|
{file = "itsdangerous-2.0.1-py3-none-any.whl", hash = "sha256:5174094b9637652bdb841a3029700391451bd092ba3db90600dea710ba28e97c"},
|
||||||
|
{file = "itsdangerous-2.0.1.tar.gz", hash = "sha256:9e724d68fc22902a1435351f84c3fb8623f303fffcc566a4cb952df8c572cff0"},
|
||||||
|
]
|
||||||
|
passlib = [
|
||||||
|
{file = "passlib-1.7.4-py2.py3-none-any.whl", hash = "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1"},
|
||||||
|
{file = "passlib-1.7.4.tar.gz", hash = "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04"},
|
||||||
|
]
|
||||||
|
psycopg2 = [
|
||||||
|
{file = "psycopg2-2.9.1-cp36-cp36m-win32.whl", hash = "sha256:7f91312f065df517187134cce8e395ab37f5b601a42446bdc0f0d51773621854"},
|
||||||
|
{file = "psycopg2-2.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:830c8e8dddab6b6716a4bf73a09910c7954a92f40cf1d1e702fb93c8a919cc56"},
|
||||||
|
{file = "psycopg2-2.9.1-cp37-cp37m-win32.whl", hash = "sha256:89409d369f4882c47f7ea20c42c5046879ce22c1e4ea20ef3b00a4dfc0a7f188"},
|
||||||
|
{file = "psycopg2-2.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7640e1e4d72444ef012e275e7b53204d7fab341fb22bc76057ede22fe6860b25"},
|
||||||
|
{file = "psycopg2-2.9.1-cp38-cp38-win32.whl", hash = "sha256:079d97fc22de90da1d370c90583659a9f9a6ee4007355f5825e5f1c70dffc1fa"},
|
||||||
|
{file = "psycopg2-2.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:2c992196719fadda59f72d44603ee1a2fdcc67de097eea38d41c7ad9ad246e62"},
|
||||||
|
{file = "psycopg2-2.9.1-cp39-cp39-win32.whl", hash = "sha256:2087013c159a73e09713294a44d0c8008204d06326006b7f652bef5ace66eebb"},
|
||||||
|
{file = "psycopg2-2.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:bf35a25f1aaa8a3781195595577fcbb59934856ee46b4f252f56ad12b8043bcf"},
|
||||||
|
{file = "psycopg2-2.9.1.tar.gz", hash = "sha256:de5303a6f1d0a7a34b9d40e4d3bef684ccc44a49bbe3eb85e3c0bffb4a131b7c"},
|
||||||
|
]
|
||||||
|
psycopg2-binary = [
|
||||||
|
{file = "psycopg2-binary-2.9.1.tar.gz", hash = "sha256:b0221ca5a9837e040ebf61f48899926b5783668b7807419e4adae8175a31f773"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:24b0b6688b9f31a911f2361fe818492650795c9e5d3a1bc647acbd7440142a4f"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:542875f62bc56e91c6eac05a0deadeae20e1730be4c6334d8f04c944fcd99759"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:661509f51531ec125e52357a489ea3806640d0ca37d9dada461ffc69ee1e7b6e"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:d92272c7c16e105788efe2cfa5d680f07e34e0c29b03c1908f8636f55d5f915a"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:736b8797b58febabb85494142c627bd182b50d2a7ec65322983e71065ad3034c"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp36-cp36m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:c250a7ec489b652c892e4f0a5d122cc14c3780f9f643e1a326754aedf82d9a76"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aef9aee84ec78af51107181d02fe8773b100b01c5dfde351184ad9223eab3698"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123c3fb684e9abfc47218d3784c7b4c47c8587951ea4dd5bc38b6636ac57f616"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_24_aarch64.whl", hash = "sha256:995fc41ebda5a7a663a254a1dcac52638c3e847f48307b5416ee373da15075d7"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp36-cp36m-manylinux_2_24_ppc64le.whl", hash = "sha256:fbb42a541b1093385a2d8c7eec94d26d30437d0e77c1d25dae1dcc46741a385e"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp36-cp36m-win32.whl", hash = "sha256:20f1ab44d8c352074e2d7ca67dc00843067788791be373e67a0911998787ce7d"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f6fac64a38f6768e7bc7b035b9e10d8a538a9fadce06b983fb3e6fa55ac5f5ce"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp37-cp37m-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:1e3a362790edc0a365385b1ac4cc0acc429a0c0d662d829a50b6ce743ae61b5a"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8559617b1fcf59a9aedba2c9838b5b6aa211ffedecabca412b92a1ff75aac1a"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a36c7eb6152ba5467fb264d73844877be8b0847874d4822b7cf2d3c0cb8cdcb0"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:2f62c207d1740b0bde5c4e949f857b044818f734a3d57f1d0d0edc65050532ed"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:cfc523edecddaef56f6740d7de1ce24a2fdf94fd5e704091856a201872e37f9f"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp37-cp37m-win32.whl", hash = "sha256:1e85b74cbbb3056e3656f1cc4781294df03383127a8114cbc6531e8b8367bf1e"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:1473c0215b0613dd938db54a653f68251a45a78b05f6fc21af4326f40e8360a2"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:35c4310f8febe41f442d3c65066ca93cccefd75013df3d8c736c5b93ec288140"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c13d72ed6af7fd2c8acbd95661cf9477f94e381fce0792c04981a8283b52917"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14db1752acdd2187d99cb2ca0a1a6dfe57fc65c3281e0f20e597aac8d2a5bd90"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:aed4a9a7e3221b3e252c39d0bf794c438dc5453bc2963e8befe9d4cd324dff72"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:da113b70f6ec40e7d81b43d1b139b9db6a05727ab8be1ee559f3a69854a69d34"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp38-cp38-win32.whl", hash = "sha256:4235f9d5ddcab0b8dbd723dca56ea2922b485ea00e1dafacf33b0c7e840b3d32"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:988b47ac70d204aed01589ed342303da7c4d84b56c2f4c4b8b00deda123372bf"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:7360647ea04db2e7dff1648d1da825c8cf68dc5fbd80b8fb5b3ee9f068dcd21a"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca86db5b561b894f9e5f115d6a159fff2a2570a652e07889d8a383b5fae66eb4"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ced67f1e34e1a450cdb48eb53ca73b60aa0af21c46b9b35ac3e581cf9f00e31"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:0f2e04bd2a2ab54fa44ee67fe2d002bb90cee1c0f1cc0ebc3148af7b02034cbd"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:3242b9619de955ab44581a03a64bdd7d5e470cc4183e8fcadd85ab9d3756ce7a"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp39-cp39-win32.whl", hash = "sha256:0b7dae87f0b729922e06f85f667de7bf16455d411971b2043bbd9577af9d1975"},
|
||||||
|
{file = "psycopg2_binary-2.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:b4d7679a08fea64573c969f6994a2631908bb2c0e69a7235648642f3d2e39a68"},
|
||||||
|
]
|
||||||
|
pycparser = [
|
||||||
|
{file = "pycparser-2.20-py2.py3-none-any.whl", hash = "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"},
|
||||||
|
{file = "pycparser-2.20.tar.gz", hash = "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0"},
|
||||||
|
]
|
||||||
|
pydantic = [
|
||||||
|
{file = "pydantic-1.8.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:05ddfd37c1720c392f4e0d43c484217b7521558302e7069ce8d318438d297739"},
|
||||||
|
{file = "pydantic-1.8.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a7c6002203fe2c5a1b5cbb141bb85060cbff88c2d78eccbc72d97eb7022c43e4"},
|
||||||
|
{file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:589eb6cd6361e8ac341db97602eb7f354551482368a37f4fd086c0733548308e"},
|
||||||
|
{file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:10e5622224245941efc193ad1d159887872776df7a8fd592ed746aa25d071840"},
|
||||||
|
{file = "pydantic-1.8.2-cp36-cp36m-win_amd64.whl", hash = "sha256:99a9fc39470010c45c161a1dc584997f1feb13f689ecf645f59bb4ba623e586b"},
|
||||||
|
{file = "pydantic-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a83db7205f60c6a86f2c44a61791d993dff4b73135df1973ecd9eed5ea0bda20"},
|
||||||
|
{file = "pydantic-1.8.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:41b542c0b3c42dc17da70554bc6f38cbc30d7066d2c2815a94499b5684582ecb"},
|
||||||
|
{file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:ea5cb40a3b23b3265f6325727ddfc45141b08ed665458be8c6285e7b85bd73a1"},
|
||||||
|
{file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:18b5ea242dd3e62dbf89b2b0ec9ba6c7b5abaf6af85b95a97b00279f65845a23"},
|
||||||
|
{file = "pydantic-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:234a6c19f1c14e25e362cb05c68afb7f183eb931dd3cd4605eafff055ebbf287"},
|
||||||
|
{file = "pydantic-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:021ea0e4133e8c824775a0cfe098677acf6fa5a3cbf9206a376eed3fc09302cd"},
|
||||||
|
{file = "pydantic-1.8.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e710876437bc07bd414ff453ac8ec63d219e7690128d925c6e82889d674bb505"},
|
||||||
|
{file = "pydantic-1.8.2-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:ac8eed4ca3bd3aadc58a13c2aa93cd8a884bcf21cb019f8cfecaae3b6ce3746e"},
|
||||||
|
{file = "pydantic-1.8.2-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:4a03cbbe743e9c7247ceae6f0d8898f7a64bb65800a45cbdc52d65e370570820"},
|
||||||
|
{file = "pydantic-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:8621559dcf5afacf0069ed194278f35c255dc1a1385c28b32dd6c110fd6531b3"},
|
||||||
|
{file = "pydantic-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8b223557f9510cf0bfd8b01316bf6dd281cf41826607eada99662f5e4963f316"},
|
||||||
|
{file = "pydantic-1.8.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:244ad78eeb388a43b0c927e74d3af78008e944074b7d0f4f696ddd5b2af43c62"},
|
||||||
|
{file = "pydantic-1.8.2-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:05ef5246a7ffd2ce12a619cbb29f3307b7c4509307b1b49f456657b43529dc6f"},
|
||||||
|
{file = "pydantic-1.8.2-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:54cd5121383f4a461ff7644c7ca20c0419d58052db70d8791eacbbe31528916b"},
|
||||||
|
{file = "pydantic-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:4be75bebf676a5f0f87937c6ddb061fa39cbea067240d98e298508c1bda6f3f3"},
|
||||||
|
{file = "pydantic-1.8.2-py3-none-any.whl", hash = "sha256:fec866a0b59f372b7e776f2d7308511784dace622e0992a0b59ea3ccee0ae833"},
|
||||||
|
{file = "pydantic-1.8.2.tar.gz", hash = "sha256:26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b"},
|
||||||
|
]
|
||||||
|
pyjwt = [
|
||||||
|
{file = "PyJWT-2.2.0-py3-none-any.whl", hash = "sha256:b0ed5824c8ecc5362e540c65dc6247567db130c4226670bf7699aec92fb4dae1"},
|
||||||
|
{file = "PyJWT-2.2.0.tar.gz", hash = "sha256:a0b9a3b4e5ca5517cac9f1a6e9cd30bf1aa80be74fcdf4e28eded582ecfcfbae"},
|
||||||
|
]
|
||||||
|
python-dateutil = [
|
||||||
|
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
|
||||||
|
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
|
||||||
|
]
|
||||||
|
python-frontmatter = [
|
||||||
|
{file = "python-frontmatter-1.0.0.tar.gz", hash = "sha256:e98152e977225ddafea6f01f40b4b0f1de175766322004c826ca99842d19a7cd"},
|
||||||
|
{file = "python_frontmatter-1.0.0-py3-none-any.whl", hash = "sha256:766ae75f1b301ffc5fe3494339147e0fd80bc3deff3d7590a93991978b579b08"},
|
||||||
|
]
|
||||||
|
pyyaml = [
|
||||||
|
{file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
|
||||||
|
{file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
|
||||||
|
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
|
||||||
|
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
|
||||||
|
{file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
|
||||||
|
{file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
|
||||||
|
{file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
|
||||||
|
{file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
|
||||||
|
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
|
||||||
|
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
|
||||||
|
{file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
|
||||||
|
{file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
|
||||||
|
{file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
|
||||||
|
{file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
|
||||||
|
{file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
|
||||||
|
{file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
|
||||||
|
{file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
|
||||||
|
{file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
|
||||||
|
{file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
|
||||||
|
{file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
|
||||||
|
{file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
|
||||||
|
{file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
|
||||||
|
{file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
|
||||||
|
{file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
|
||||||
|
{file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
|
||||||
|
{file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
|
||||||
|
{file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
|
||||||
|
{file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
|
||||||
|
{file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
|
||||||
|
{file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
|
||||||
|
{file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
|
||||||
|
{file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
|
||||||
|
{file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
|
||||||
|
]
|
||||||
|
requests = [
|
||||||
|
{file = "requests-2.26.0-py2.py3-none-any.whl", hash = "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24"},
|
||||||
|
{file = "requests-2.26.0.tar.gz", hash = "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"},
|
||||||
|
]
|
||||||
|
rfc3986 = [
|
||||||
|
{file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"},
|
||||||
|
{file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"},
|
||||||
|
]
|
||||||
|
six = [
|
||||||
|
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
|
||||||
|
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
|
||||||
|
]
|
||||||
|
sniffio = [
|
||||||
|
{file = "sniffio-1.2.0-py3-none-any.whl", hash = "sha256:471b71698eac1c2112a40ce2752bb2f4a4814c22a54a3eed3676bc0f5ca9f663"},
|
||||||
|
{file = "sniffio-1.2.0.tar.gz", hash = "sha256:c4666eecec1d3f50960c6bdf61ab7bc350648da6c126e3cf6898d8cd4ddcd3de"},
|
||||||
|
]
|
||||||
|
soupsieve = [
|
||||||
|
{file = "soupsieve-2.2.1-py3-none-any.whl", hash = "sha256:c2c1c2d44f158cdbddab7824a9af8c4f83c76b1e23e049479aa432feb6c4c23b"},
|
||||||
|
{file = "soupsieve-2.2.1.tar.gz", hash = "sha256:052774848f448cf19c7e959adf5566904d525f33a3f8b6ba6f6f8f26ec7de0cc"},
|
||||||
|
]
|
||||||
|
sqlalchemy = [
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:a36ea43919e51b0de0c0bc52bcfdad7683f6ea9fb81b340cdabb9df0e045e0f7"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:75cd5d48389a7635393ff5a9214b90695c06b3d74912109c3b00ce7392b69c6c"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp27-cp27m-win32.whl", hash = "sha256:16ef07e102d2d4f974ba9b0d4ac46345a411ad20ad988b3654d59ff08e553b1c"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp27-cp27m-win_amd64.whl", hash = "sha256:a79abdb404d9256afb8aeaa0d3a4bc7d3b6d8b66103d8b0f2f91febd3909976e"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7ad59e2e16578b6c1a2873e4888134112365605b08a6067dd91e899e026efa1c"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:a505ecc0642f52e7c65afb02cc6181377d833b7df0994ecde15943b18d0fa89c"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a28fe28c359835f3be20c89efd517b35e8f97dbb2ca09c6cf0d9ac07f62d7ef6"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:41a916d815a3a23cb7fff8d11ad0c9b93369ac074e91e428075e088fe57d5358"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:842c49dd584aedd75c2ee05f6c950730c3ffcddd21c5824ed0f820808387e1e3"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp36-cp36m-win32.whl", hash = "sha256:6b602e3351f59f3999e9fb8b87e5b95cb2faab6a6ecdb482382ac6fdfbee5266"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp36-cp36m-win_amd64.whl", hash = "sha256:6400b22e4e41cc27623a9a75630b7719579cd9a3a2027bcf16ad5aaa9a7806c0"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:dd4ed12a775f2cde4519f4267d3601990a97d8ecde5c944ab06bfd6e8e8ea177"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b7778a205f956755e05721eebf9f11a6ac18b2409bff5db53ce5fe7ede79831"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:08d9396a2a38e672133266b31ed39b2b1f2b5ec712b5bff5e08033970563316a"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e93978993a2ad0af43f132be3ea8805f56b2f2cd223403ec28d3e7d5c6d39ed1"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp37-cp37m-win32.whl", hash = "sha256:0566a6e90951590c0307c75f9176597c88ef4be2724958ca1d28e8ae05ec8822"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp37-cp37m-win_amd64.whl", hash = "sha256:0b08a53e40b34205acfeb5328b832f44437956d673a6c09fce55c66ab0e54916"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:33a1e86abad782e90976de36150d910748b58e02cd7d35680d441f9a76806c18"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ed67aae8cde4d32aacbdba4f7f38183d14443b714498eada5e5a7a37769c0b7"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1ebd69365717becaa1b618220a3df97f7c08aa68e759491de516d1c3667bba54"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26b0cd2d5c7ea96d3230cb20acac3d89de3b593339c1447b4d64bfcf4eac1110"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp38-cp38-win32.whl", hash = "sha256:c211e8ec81522ce87b0b39f0cf0712c998d4305a030459a0e115a2b3dc71598f"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp38-cp38-win_amd64.whl", hash = "sha256:9a1df8c93a0dd9cef0839917f0c6c49f46c75810cf8852be49884da4a7de3c59"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:1b38db2417b9f7005d6ceba7ce2a526bf10e3f6f635c0f163e6ed6a42b5b62b2"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e37621b37c73b034997b5116678862f38ee70e5a054821c7b19d0e55df270dec"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:91cd87d1de0111eaca11ccc3d31af441c753fa2bc22df72e5009cfb0a1af5b03"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90fe429285b171bcc252e21515703bdc2a4721008d1f13aa5b7150336f8a8493"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp39-cp39-win32.whl", hash = "sha256:6003771ea597346ab1e97f2f58405c6cacbf6a308af3d28a9201a643c0ac7bb3"},
|
||||||
|
{file = "SQLAlchemy-1.4.25-cp39-cp39-win_amd64.whl", hash = "sha256:9ebe49c3960aa2219292ea2e5df6acdc425fc828f2f3d50b4cfae1692bcb5f02"},
|
||||||
|
{file = "SQLAlchemy-1.4.25.tar.gz", hash = "sha256:1adf3d25e2e33afbcd48cfad8076f9378793be43e7fec3e4334306cac6bec138"},
|
||||||
|
]
|
||||||
|
starlette = [
|
||||||
|
{file = "starlette-0.14.2-py3-none-any.whl", hash = "sha256:3c8e48e52736b3161e34c9f0e8153b4f32ec5d8995a3ee1d59410d92f75162ed"},
|
||||||
|
{file = "starlette-0.14.2.tar.gz", hash = "sha256:7d49f4a27f8742262ef1470608c59ddbc66baf37c148e938c7038e6bc7a998aa"},
|
||||||
|
]
|
||||||
|
transliterate = [
|
||||||
|
{file = "transliterate-1.10.2-py2.py3-none-any.whl", hash = "sha256:010a5021bf6021689c4fade0985f3f7b3db1f2f16a48a09a56797f171c08ed42"},
|
||||||
|
{file = "transliterate-1.10.2.tar.gz", hash = "sha256:bc608e0d48e687db9c2b1d7ea7c381afe0d1849cad216087d8e03d8d06a57c85"},
|
||||||
|
]
|
||||||
|
typing-extensions = [
|
||||||
|
{file = "typing_extensions-3.10.0.2-py2-none-any.whl", hash = "sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7"},
|
||||||
|
{file = "typing_extensions-3.10.0.2-py3-none-any.whl", hash = "sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"},
|
||||||
|
{file = "typing_extensions-3.10.0.2.tar.gz", hash = "sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e"},
|
||||||
|
]
|
||||||
|
urllib3 = [
|
||||||
|
{file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"},
|
||||||
|
{file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"},
|
||||||
|
]
|
||||||
|
uvicorn = [
|
||||||
|
{file = "uvicorn-0.15.0-py3-none-any.whl", hash = "sha256:17f898c64c71a2640514d4089da2689e5db1ce5d4086c2d53699bf99513421c1"},
|
||||||
|
{file = "uvicorn-0.15.0.tar.gz", hash = "sha256:d9a3c0dd1ca86728d3e235182683b4cf94cd53a867c288eaeca80ee781b2caff"},
|
||||||
|
]
|
34
pyproject.toml
Normal file
34
pyproject.toml
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
[tool.poetry]
|
||||||
|
name = "api"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = ""
|
||||||
|
authors = ["Discours DevTeam <dev@discours.io>"]
|
||||||
|
license = "MIT"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = "^3.9"
|
||||||
|
html2text = "*"
|
||||||
|
aioredis = "*"
|
||||||
|
ariadne = "*"
|
||||||
|
PyJWT = "*"
|
||||||
|
starlette = "*"
|
||||||
|
SQLAlchemy = "*"
|
||||||
|
uvicorn = "*"
|
||||||
|
pydantic = "*"
|
||||||
|
passlib = "*"
|
||||||
|
itsdangerous = "*"
|
||||||
|
httpx = "*"
|
||||||
|
psycopg2-binary = "*"
|
||||||
|
Authlib = "*"
|
||||||
|
bson = "*"
|
||||||
|
python-frontmatter = "*"
|
||||||
|
bs4 = "*"
|
||||||
|
transliterate = "*"
|
||||||
|
psycopg2 = "*"
|
||||||
|
requests = "*"
|
||||||
|
|
||||||
|
[tool.poetry.dev-dependencies]
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["poetry-core>=1.0.0"]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
|
@ -192,6 +192,7 @@ type Comment {
|
||||||
replyTo: Int!
|
replyTo: Int!
|
||||||
createdAt: DateTime!
|
createdAt: DateTime!
|
||||||
updatedAt: DateTime
|
updatedAt: DateTime
|
||||||
|
updatedBy: Int
|
||||||
shout: Int!
|
shout: Int!
|
||||||
deletedAt: DateTime
|
deletedAt: DateTime
|
||||||
deletedBy: Int
|
deletedBy: Int
|
||||||
|
|
Loading…
Reference in New Issue
Block a user