From 63fbf8758a9d6aa6127eaebd5581aef4e3cd4f46 Mon Sep 17 00:00:00 2001 From: Mike Crute Date: Tue, 21 May 2019 13:20:16 +0000 Subject: Checkin initial prototype --- wiki_upload_test.py | 223 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 wiki_upload_test.py diff --git a/wiki_upload_test.py b/wiki_upload_test.py new file mode 100644 index 0000000..2c256b4 --- /dev/null +++ b/wiki_upload_test.py @@ -0,0 +1,223 @@ +def get_wiki(path): + import imp, os.path + return imp.load_module('wiki', open(os.path.expanduser(path)), path, + [s for s in imp.get_suffixes() if s[0] == '.py'][0]) + + +wiki = get_wiki('~/bin/viwiki') + + +import re + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + + +class RawWikiWrapper(wiki.WikiAPI): + + def __init__(self, profile): + super(RawWikiWrapper, self).__init__(*wiki.get_credentials(profile)) + + def list_pages_by_prefix(self, prefix): + return self("getAllPagesEx", { "prefix": prefix }) + + def list_attachments(self, page): + return self("listAttachments", page) + + def get_attachment(self, page, attachment): + return self("getAttachment", page, attachment) + + def get_page(self, page): + return self("getPage", page) + + def get_page_info(self, page): + return self("getPageInfo", page) + + def get_processing_instruction(self, page, pi): + return self("getProcessingInstruction", page, pi) + + +class WikiWrapper(RawWikiWrapper): + + def __init__(self, *args): + super(WikiWrapper, self).__init__(*args) + self._api = super(WikiWrapper, self) + + def list_pages_by_prefix(self, prefix): + return [ + WikiPage(name, self._api) + for name in super(WikiWrapper, self).list_pages_by_prefix(prefix) + ] + + def get_page(self, page): + return WikiPage(page, self._api) + + +class WikiPage(object): + + def __init__(self, name, api, contents=None): + self.name = name + self._api = api + self._contents = contents + self._meta = None + self._pis = None + + def __repr__(self): + return "{}({!r}, {!r})".format( + self.__class__.__name__, self.name, self._api) + + def attachments(self): + return [ + WikiAttachment(self.name, filename, self) + for filename in self._api.list_attachments(self.name) + ] + + @staticmethod + def _parse_pi(line): + line = line[1:].strip() + type, args = line.split(" ", 1) + + if type == "pragma": + return type, args + elif type == "#": + return None + else: + return type, args.split(" ") + + def _parse(self, contents): + buffer = StringIO() + have_body = False + pis = {} + + for line in contents.split("\n"): + if not have_body and line.startswith("#"): + res = self._parse_pi(line) + if res: + pis[res[0]] = res[1] + elif not have_body and not line.startswith("#"): + self.have_body = True + buffer.write(u"{}\n".format(line)) + else: + buffer.write(u"{}\n".format(line)) + + return buffer.getvalue(), pis + + @property + def contents(self): + if not self._contents: + self._contents , self._pis = self._parse( + self._api.get_page(self.name)) + return self._contents + + @property + def meta(self): + if not self._meta: + self._meta = self._api.get_page_info(self.name) + return self._meta + + @property + def last_modified(self): + return self.meta["lastModified"] + + @property + def version(self): + return self.meta["version"] + + @property + def author(self): + return self.meta["author"].split(":")[-1] + + @property + def format(self): + return self._pis.get("format", ["rst"])[0] + + +class WikiAttachment(object): + + def __init__(self, page, filename, api): + self.page = page + self.filename = filename + self._api = api + + def __repr__(self): + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, self.page, self.filename, self._api) + + def get_contents(self): + return self._api.get_attachment(self.page, self.filename) + + +class PageTree(object): + + def __init__(self, root_path): + self.root_path = root_path + + +class PageNode(object): + + def __init__(self, page=None): + self.page = page + self.children = [] + + def file_name(self, root_path): + return "/".join([ + re.sub("(.)([A-Z]+)", r"\1-\2", p).lower() + for p in self.page.name[len(root_path):].split("/") + ]) + +# Metadata: +# Site Title +# Footer Link (n) +# Date +# Author +# Tag +# Template (filename) +# Renderer (Page, Blog) + +# Page tree: +# Keep metadata attributes from parents +# Add robots (/robots.txt) +# Add sitemap (/sitemap.xml) (https://www.sitemaps.org/protocol.html) + +# Site: +# Load template +# Render page +# Render text-only page +# Set copyright date + +# Home Page: +# Extract bottom site links +# Extract page header +# Extract site title + +# All pages: +# Handle attachments +# Embed images +# Rewrite links +# Extract title +# Extract author +# Extract date +# Extract tags +# Extract lat updated date for footer + +# Blog: +# Full post list page (/blog) +# By date page (/archive) +# Tags page (/tags) +# Atom feed (/feed, /atom) +# Rss feed (/rss) + + +if __name__ == "__main__": + api = WikiWrapper("crute") + + #print(api.get_page("MikeCruteWebsite")) + #print(api.list_pages_by_prefix("MikeCruteWebsite/")) + #print(api.list_attachments("MikeCruteWebsite/Talks/ClePyAST")) + #print(api.get_attachment("MikeCruteWebsite/Talks/ClePyAST", "clepy-python-ast.pdf")) + + page = api.get_page("MikeCruteWebsite/Talks/ClepyAst") + + import pdb; pdb.set_trace(); print("") -- cgit v1.2.3