From be794d825cff203f3ca943eba7b2e37de9b60b56 Mon Sep 17 00:00:00 2001 From: root Date: Sun, 13 Dec 2015 18:00:56 -0800 Subject: Initial import --- audit_acl.sh | 17 +++++ backup | 207 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ ddns.py | 79 +++++++++++++++++++++ run.sh | 51 ++++++++++++++ ssha_password.py | 19 +++++ 5 files changed, 373 insertions(+) create mode 100755 audit_acl.sh create mode 100755 backup create mode 100755 ddns.py create mode 100755 run.sh create mode 100755 ssha_password.py diff --git a/audit_acl.sh b/audit_acl.sh new file mode 100755 index 0000000..e1bbce0 --- /dev/null +++ b/audit_acl.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Print out page names and their ACLS for all ACL'd pages +# + +ROOT="$HOME/wiki-data/data/pages" + +for page in "$ROOT"/*; do + current_rev="$page/revisions/`cat $page/current 2>/dev/null`" + page_name=`basename "$page" | sed 's#(2f)#/#g'` + if [ -f "$current_rev" ]; then + output=`grep '#acl' "$current_rev"` + if [[ $? == 0 ]]; then + echo -e "\033[1;31m$page_name:\033[0m $output" + fi + fi +done diff --git a/backup b/backup new file mode 100755 index 0000000..8d0bd5d --- /dev/null +++ b/backup @@ -0,0 +1,207 @@ +#!/usr/bin/env python + +import os +import sys +import math +import logging +import subprocess +from glob import glob +from tempfile import mkstemp +from datetime import datetime +from multiprocessing import Pool +from collections import namedtuple +from boto.s3.connection import S3Connection +from ConfigParser import SafeConfigParser + +logging.basicConfig(level=logging.DEBUG, + format="%(asctime)s %(name)s[%(levelname)s]: %(message)s") +logger = logging.getLogger("backups") + +# Boto is very noisy +logging.getLogger('boto').setLevel(logging.ERROR) + +BucketId = namedtuple('BucketId', ('key', 'secret', 'name')) +FIVE_MB = 5242880 +DATE_FORMAT = "%Y%m%d%H%M%S" +UPLOAD_PROCESSES = 8 + + +def get_bucket(bucketid): + conn = S3Connection(bucketid.key, bucketid.secret) + return conn.get_bucket(bucketid.name) + + +def get_upload(bucket, multipart_id): + for mp in bucket.get_all_multipart_uploads(): + if mp.id == multipart_id: + return mp + + +def _upload_part(bucketid, multipart_id, part_num, source_path, bytes): + amount_of_retries = 5 + + while amount_of_retries > 0: + try: + mp = get_upload(get_bucket(bucketid), multipart_id) + + with open(source_path, "r") as fp: + fp.seek((part_num - 1) * FIVE_MB) + mp.upload_part_from_file(fp=fp, part_num=part_num, size=bytes) + + return True + except Exception, exc: + amount_of_retries -= 1 + logger.warn("Upload part %s of %r failed, %s retries remaining", + part_num, source_path, amount_of_retries) + + return False + + +def upload(bucketid, keyname, source_path): + bucket = get_bucket(bucketid) + mp = bucket.initiate_multipart_upload(keyname) + pool = Pool(processes=UPLOAD_PROCESSES) + + results = [] + + i = 0 + remaining = os.stat(source_path).st_size + total = int(math.ceil(remaining / float(FIVE_MB))) + logger.debug("Uploading in %d chunks", total) + while remaining > 0: + i += 1 + bytes = min([FIVE_MB, remaining]) + remaining -= bytes + results.append(pool.apply_async( + _upload_part, [bucketid, mp.id, i, source_path, bytes])) + + pool.close() + pool.join() + + if all([result.get(1) for result in results]): + logger.info("Upload succeeded") + mp.complete_upload() + bucket.get_key(keyname).set_acl('private') + return 0 + else: + logger.error("Upload failed, removing parts") + mp.cancel_upload() + return 1 + + +class ConfigParser(SafeConfigParser): + + def __init__(self, filename): + SafeConfigParser.__init__(self) + + with open(filename, "r") as fp: + self.readfp(fp) + + self.primary_section = self.sections()[0] + + def __getattr__(self, key): + if key == "mark_caches": + return self.getboolean(self.primary_section, key) + elif key == "keep": + return self.getint(self.primary_section, key) + else: + return self.get(self.primary_section, key) + + @property + def path(self): + return self.primary_section + + @property + def bucket_id(self): + return BucketId(self.access_key, self.secret_key, self.bucket) + + +def get_file_date(filename): + return datetime.strptime(filename.split("-")[-1], "%Y%m%d%H%M%S") + + +def backup_comparator(lhs, rhs): + return cmp(get_file_date(rhs.name), get_file_date(lhs.name)) + + +def trim_backups(bucket_id, prefix, max_items=3): + items = list(get_bucket(bucket_id).list(prefix)) + items.sort(backup_comparator) + + for item in items[max_items:]: + logger.info("Pruning backup %s", item.name) + item.delete() + + +def mark_caches(path): + cmd = ("find '{path}' -type d -name 'cache' " + "-exec bash -c ""'echo " + "\"Signature: 8a477f597d28d172789f06886806bc55\" >" + " \"{{}}/CACHEDIR.TAG\"' \;") + subprocess.call(cmd.format(path=path), shell=True) + + +def tar_encrypt(now, key, path, tempdir="/srv/tmp"): + old_umask = os.umask(0200) + + key_file = mkstemp(dir=tempdir) + archive_file = mkstemp(dir=tempdir) + + logger.debug("Key file %s", key_file[1]) + logger.debug("Archive file %s", archive_file[1]) + + tar_cmd = ("tar", "-c", "-C", path, "--exclude-caches", ".") + gpg_cmd = ("gpg", "-c", "-q", "--no-use-agent", "--batch", "--yes", + "--s2k-count", "1024", "--cipher-algo", "AES256", + "--digest-algo", "SHA512", "--passphrase-file", + key_file[1], "-o", archive_file[1]) + + try: + os.write(key_file[0], "{}{}".format(key, now)) + + tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE) + gpg = subprocess.Popen(gpg_cmd, stdin=tar.stdout) + gpg.communicate() + finally: + os.unlink(key_file[1]) + os.umask(old_umask) + + return archive_file[1] + + +def do_backup(cfg_file): + cfg = ConfigParser(cfg_file) + now = datetime.now().strftime(DATE_FORMAT) + + logger.info("Starting backup for %s", cfg.primary_section) + + if cfg.mark_caches: + logger.info("Marking caches") + mark_caches(cfg.path) + + logger.info("Creating archive") + archive_file = tar_encrypt(now, cfg.encryption_key, cfg.path) + logger.info("Finished creating archive") + + try: + logger.info("Uploading archive") + upload(cfg.bucket_id, + "{}-{}".format(cfg.filename, now), archive_file) + logger.info("Finished uploading archive") + finally: + os.unlink(archive_file) + + logger.info("Trimming backups") + trim_backups(cfg.bucket_id, "{}-".format(cfg.filename), cfg.keep) + logger.info("Finished trimming backups") + + logger.info("Backup for %s finished", cfg.primary_section) + + +def do_all_backups(pattern): + for config_file in glob(pattern): + do_backup(config_file) + + +if __name__ == "__main__": + do_all_backups("/srv/etc/backups/*.conf") diff --git a/ddns.py b/ddns.py new file mode 100755 index 0000000..5ec92da --- /dev/null +++ b/ddns.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +import os +import flask +import hashlib +import functools +from boto.route53.record import ResourceRecordSets +from boto.route53.connection import Route53Connection + + +app = flask.Flask(__name__) +app.config.from_pyfile("/etc/ddns.cfg", silent=True) +app.config.from_pyfile("ddns.cfg", silent=True) +if "AMAZON_KEY_ID" not in app.config: + raise Exception("Not configured") + + +def returns_plain_text(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + return flask.Response(f(*args, **kwargs), content_type="text/plain") + + return wrapper + + +def get_ip(): + if "X-Forwarded-For" in flask.request.headers: + return flask.request.headers["X-Forwarded-For"] + else: + return flask.request.remote_addr + + +def update_record(zone, record, ip): + conn = Route53Connection(app.config["AMAZON_KEY_ID"], + app.config["AMAZON_SECRET_KEY"]) + change_set = ResourceRecordSets(conn, conn.get_zone(zone).id) + change_set.add_change("UPSERT", record, type="A", ttl=60).add_value(ip) + change_set.commit() + + +@app.errorhandler(404) +@app.errorhandler(405) +@app.errorhandler(500) +def handle_error(ex): + response = flask.Response("Error", content_type="text/plain") + response.status_code = getattr(ex, "code", 500) + return response + + +@app.route("/new-secret", methods=["GET"]) +@returns_plain_text +def new_secret(): + return hashlib.sha256(os.urandom(100)).hexdigest() + + +@app.route("/update", methods=["POST"]) +def update_ip(): + key = flask.request.form.get("key") + config = app.config["CLIENTS"].get(key) + + if not config: + flask.abort(404) + + try: + update_record(config["zone"], config["resource"], get_ip()) + return "OK" + except: + flask.abort(500) + + +@app.route("/", methods=["GET"]) +@returns_plain_text +def handle_home(): + return get_ip() + + +if __name__ == "__main__": + app.debug = True + app.run() diff --git a/run.sh b/run.sh new file mode 100755 index 0000000..32ad77f --- /dev/null +++ b/run.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +export PYTHONPATH=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) + +source $(dirname "$PYTHONPATH")/venv/bin/activate + +WORKERS=2 +BACKUP_KEY="$HOME/.private/backupkey.pem" +BIND_PATH="127.0.0.1:9000" +[[ -z "$2" ]] || BIND_PATH="$2" + +MAX_KEEP=`python -c "import wikiconfig; print wikiconfig.Config.backup_set_keep" 2>/dev/null` +S3_BUCKET=`python -c "import wikiconfig; print wikiconfig.Config.backup_s3_bucket" 2>/dev/null` + +APPLICATION="MoinMoin.wsgiapp:application" +[[ -z "$DEBUG" ]] || APPLICATION="wsgi:application" + +if [[ "$1" == "serve" ]]; then + gunicorn --timeout=300 --preload -w $WORKERS -b $BIND_PATH $APPLICATION +elif [[ "$1" == "cron" ]]; then + moin maint makecache +elif [[ "$1" == "reindex" ]]; then + moin index build --mode=rebuild +elif [[ "$1" == "backup" ]]; then + if [[ -z "$S3_BUCKET" || -z "$MAX_KEEP" || ! -r $BACKUP_KEY ]]; then + echo "Not properly configured" + exit 1 + fi + + FILENAME="$HOME/backup-`date +%Y%m%d%H%M%S`.bak" + + # Backup first + # + # Place CACHEDIR.TAG files in cache directories to prevent backup. + # This is needed because thumbnail images in caches can cause lots of + # wasted backup space. + find "$HOME/wiki-data/" -type d -name 'cache' -exec bash -c 'echo "Signature: 8a477f597d28d172789f06886806bc55" > "{}/CACHEDIR.TAG"' \; + + ( cd $HOME && tar -cj --exclude-caches wiki-data/ | openssl aes-256-cbc -pass "file:$BACKUP_KEY" > $FILENAME ) + s3cmd put --no-progress $FILENAME $S3_BUCKET + rm $FILENAME + + # Then prune the backup set to a maximum of N files + ALL_FILES=( $(s3cmd ls $S3_BUCKET | awk '{ print $4 }' | sort -nr) ) + + for (( i=$MAX_KEEP ; i < ${#ALL_FILES[@]}; i++ )); do + s3cmd del ${ALL_FILES[i]} + done +else + moin $* +fi diff --git a/ssha_password.py b/ssha_password.py new file mode 100755 index 0000000..adfde45 --- /dev/null +++ b/ssha_password.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +import hashlib +import os + + +def make_secret(password): + salt = os.urandom(4) + + sha = hashlib.sha1(password) + sha.update(salt) + + digest_salt_b64 = '{}{}'.format(sha.digest(), salt).encode('base64').strip() + + return '{{SSHA}}{}'.format(digest_salt_b64) + + +if __name__ == '__main__': + print make_secret("") -- cgit v1.2.3